xref: /openbmc/linux/drivers/scsi/be2iscsi/be_main.c (revision 95e9fd10)
1 /**
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11  *
12  * Contact Information:
13  * linux-drivers@emulex.com
14  *
15  * Emulex
16  * 3333 Susan Street
17  * Costa Mesa, CA 92626
18  */
19 
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32 
33 #include <scsi/libiscsi.h>
34 #include <scsi/scsi_bsg_iscsi.h>
35 #include <scsi/scsi_netlink.h>
36 #include <scsi/scsi_transport_iscsi.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
42 #include "be_main.h"
43 #include "be_iscsi.h"
44 #include "be_mgmt.h"
45 
46 static unsigned int be_iopoll_budget = 10;
47 static unsigned int be_max_phys_size = 64;
48 static unsigned int enable_msix = 1;
49 static unsigned int gcrashmode = 0;
50 static unsigned int num_hba = 0;
51 
52 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
53 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
54 MODULE_VERSION(BUILD_STR);
55 MODULE_AUTHOR("Emulex Corporation");
56 MODULE_LICENSE("GPL");
57 module_param(be_iopoll_budget, int, 0);
58 module_param(enable_msix, int, 0);
59 module_param(be_max_phys_size, uint, S_IRUGO);
60 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
61 				   "contiguous memory that can be allocated."
62 				   "Range is 16 - 128");
63 
64 static int beiscsi_slave_configure(struct scsi_device *sdev)
65 {
66 	blk_queue_max_segment_size(sdev->request_queue, 65536);
67 	return 0;
68 }
69 
70 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
71 {
72 	struct iscsi_cls_session *cls_session;
73 	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
74 	struct beiscsi_io_task *aborted_io_task;
75 	struct iscsi_conn *conn;
76 	struct beiscsi_conn *beiscsi_conn;
77 	struct beiscsi_hba *phba;
78 	struct iscsi_session *session;
79 	struct invalidate_command_table *inv_tbl;
80 	struct be_dma_mem nonemb_cmd;
81 	unsigned int cid, tag, num_invalidate;
82 
83 	cls_session = starget_to_session(scsi_target(sc->device));
84 	session = cls_session->dd_data;
85 
86 	spin_lock_bh(&session->lock);
87 	if (!aborted_task || !aborted_task->sc) {
88 		/* we raced */
89 		spin_unlock_bh(&session->lock);
90 		return SUCCESS;
91 	}
92 
93 	aborted_io_task = aborted_task->dd_data;
94 	if (!aborted_io_task->scsi_cmnd) {
95 		/* raced or invalid command */
96 		spin_unlock_bh(&session->lock);
97 		return SUCCESS;
98 	}
99 	spin_unlock_bh(&session->lock);
100 	conn = aborted_task->conn;
101 	beiscsi_conn = conn->dd_data;
102 	phba = beiscsi_conn->phba;
103 
104 	/* invalidate iocb */
105 	cid = beiscsi_conn->beiscsi_conn_cid;
106 	inv_tbl = phba->inv_tbl;
107 	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
108 	inv_tbl->cid = cid;
109 	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
110 	num_invalidate = 1;
111 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
112 				sizeof(struct invalidate_commands_params_in),
113 				&nonemb_cmd.dma);
114 	if (nonemb_cmd.va == NULL) {
115 		SE_DEBUG(DBG_LVL_1,
116 			 "Failed to allocate memory for"
117 			 "mgmt_invalidate_icds\n");
118 		return FAILED;
119 	}
120 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
121 
122 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
123 				   cid, &nonemb_cmd);
124 	if (!tag) {
125 		shost_printk(KERN_WARNING, phba->shost,
126 			     "mgmt_invalidate_icds could not be"
127 			     " submitted\n");
128 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
129 				    nonemb_cmd.va, nonemb_cmd.dma);
130 
131 		return FAILED;
132 	} else {
133 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
134 					 phba->ctrl.mcc_numtag[tag]);
135 		free_mcc_tag(&phba->ctrl, tag);
136 	}
137 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
138 			    nonemb_cmd.va, nonemb_cmd.dma);
139 	return iscsi_eh_abort(sc);
140 }
141 
142 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
143 {
144 	struct iscsi_task *abrt_task;
145 	struct beiscsi_io_task *abrt_io_task;
146 	struct iscsi_conn *conn;
147 	struct beiscsi_conn *beiscsi_conn;
148 	struct beiscsi_hba *phba;
149 	struct iscsi_session *session;
150 	struct iscsi_cls_session *cls_session;
151 	struct invalidate_command_table *inv_tbl;
152 	struct be_dma_mem nonemb_cmd;
153 	unsigned int cid, tag, i, num_invalidate;
154 
155 	/* invalidate iocbs */
156 	cls_session = starget_to_session(scsi_target(sc->device));
157 	session = cls_session->dd_data;
158 	spin_lock_bh(&session->lock);
159 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
160 		spin_unlock_bh(&session->lock);
161 		return FAILED;
162 	}
163 	conn = session->leadconn;
164 	beiscsi_conn = conn->dd_data;
165 	phba = beiscsi_conn->phba;
166 	cid = beiscsi_conn->beiscsi_conn_cid;
167 	inv_tbl = phba->inv_tbl;
168 	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
169 	num_invalidate = 0;
170 	for (i = 0; i < conn->session->cmds_max; i++) {
171 		abrt_task = conn->session->cmds[i];
172 		abrt_io_task = abrt_task->dd_data;
173 		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
174 			continue;
175 
176 		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
177 			continue;
178 
179 		inv_tbl->cid = cid;
180 		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
181 		num_invalidate++;
182 		inv_tbl++;
183 	}
184 	spin_unlock_bh(&session->lock);
185 	inv_tbl = phba->inv_tbl;
186 
187 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
188 				sizeof(struct invalidate_commands_params_in),
189 				&nonemb_cmd.dma);
190 	if (nonemb_cmd.va == NULL) {
191 		SE_DEBUG(DBG_LVL_1,
192 			 "Failed to allocate memory for"
193 			 "mgmt_invalidate_icds\n");
194 		return FAILED;
195 	}
196 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
197 	memset(nonemb_cmd.va, 0, nonemb_cmd.size);
198 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
199 				   cid, &nonemb_cmd);
200 	if (!tag) {
201 		shost_printk(KERN_WARNING, phba->shost,
202 			     "mgmt_invalidate_icds could not be"
203 			     " submitted\n");
204 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
205 				    nonemb_cmd.va, nonemb_cmd.dma);
206 		return FAILED;
207 	} else {
208 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
209 					 phba->ctrl.mcc_numtag[tag]);
210 		free_mcc_tag(&phba->ctrl, tag);
211 	}
212 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
213 			    nonemb_cmd.va, nonemb_cmd.dma);
214 	return iscsi_eh_device_reset(sc);
215 }
216 
217 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
218 {
219 	struct beiscsi_hba *phba = data;
220 	struct mgmt_session_info *boot_sess = &phba->boot_sess;
221 	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
222 	char *str = buf;
223 	int rc;
224 
225 	switch (type) {
226 	case ISCSI_BOOT_TGT_NAME:
227 		rc = sprintf(buf, "%.*s\n",
228 			    (int)strlen(boot_sess->target_name),
229 			    (char *)&boot_sess->target_name);
230 		break;
231 	case ISCSI_BOOT_TGT_IP_ADDR:
232 		if (boot_conn->dest_ipaddr.ip_type == 0x1)
233 			rc = sprintf(buf, "%pI4\n",
234 				(char *)&boot_conn->dest_ipaddr.addr);
235 		else
236 			rc = sprintf(str, "%pI6\n",
237 				(char *)&boot_conn->dest_ipaddr.addr);
238 		break;
239 	case ISCSI_BOOT_TGT_PORT:
240 		rc = sprintf(str, "%d\n", boot_conn->dest_port);
241 		break;
242 
243 	case ISCSI_BOOT_TGT_CHAP_NAME:
244 		rc = sprintf(str,  "%.*s\n",
245 			     boot_conn->negotiated_login_options.auth_data.chap.
246 			     target_chap_name_length,
247 			     (char *)&boot_conn->negotiated_login_options.
248 			     auth_data.chap.target_chap_name);
249 		break;
250 	case ISCSI_BOOT_TGT_CHAP_SECRET:
251 		rc = sprintf(str,  "%.*s\n",
252 			     boot_conn->negotiated_login_options.auth_data.chap.
253 			     target_secret_length,
254 			     (char *)&boot_conn->negotiated_login_options.
255 			     auth_data.chap.target_secret);
256 		break;
257 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
258 		rc = sprintf(str,  "%.*s\n",
259 			     boot_conn->negotiated_login_options.auth_data.chap.
260 			     intr_chap_name_length,
261 			     (char *)&boot_conn->negotiated_login_options.
262 			     auth_data.chap.intr_chap_name);
263 		break;
264 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
265 		rc = sprintf(str,  "%.*s\n",
266 			     boot_conn->negotiated_login_options.auth_data.chap.
267 			     intr_secret_length,
268 			     (char *)&boot_conn->negotiated_login_options.
269 			     auth_data.chap.intr_secret);
270 		break;
271 	case ISCSI_BOOT_TGT_FLAGS:
272 		rc = sprintf(str, "2\n");
273 		break;
274 	case ISCSI_BOOT_TGT_NIC_ASSOC:
275 		rc = sprintf(str, "0\n");
276 		break;
277 	default:
278 		rc = -ENOSYS;
279 		break;
280 	}
281 	return rc;
282 }
283 
284 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
285 {
286 	struct beiscsi_hba *phba = data;
287 	char *str = buf;
288 	int rc;
289 
290 	switch (type) {
291 	case ISCSI_BOOT_INI_INITIATOR_NAME:
292 		rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
293 		break;
294 	default:
295 		rc = -ENOSYS;
296 		break;
297 	}
298 	return rc;
299 }
300 
301 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
302 {
303 	struct beiscsi_hba *phba = data;
304 	char *str = buf;
305 	int rc;
306 
307 	switch (type) {
308 	case ISCSI_BOOT_ETH_FLAGS:
309 		rc = sprintf(str, "2\n");
310 		break;
311 	case ISCSI_BOOT_ETH_INDEX:
312 		rc = sprintf(str, "0\n");
313 		break;
314 	case ISCSI_BOOT_ETH_MAC:
315 		rc  = beiscsi_get_macaddr(str, phba);
316 		break;
317 	default:
318 		rc = -ENOSYS;
319 		break;
320 	}
321 	return rc;
322 }
323 
324 
325 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
326 {
327 	umode_t rc;
328 
329 	switch (type) {
330 	case ISCSI_BOOT_TGT_NAME:
331 	case ISCSI_BOOT_TGT_IP_ADDR:
332 	case ISCSI_BOOT_TGT_PORT:
333 	case ISCSI_BOOT_TGT_CHAP_NAME:
334 	case ISCSI_BOOT_TGT_CHAP_SECRET:
335 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
336 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
337 	case ISCSI_BOOT_TGT_NIC_ASSOC:
338 	case ISCSI_BOOT_TGT_FLAGS:
339 		rc = S_IRUGO;
340 		break;
341 	default:
342 		rc = 0;
343 		break;
344 	}
345 	return rc;
346 }
347 
348 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
349 {
350 	umode_t rc;
351 
352 	switch (type) {
353 	case ISCSI_BOOT_INI_INITIATOR_NAME:
354 		rc = S_IRUGO;
355 		break;
356 	default:
357 		rc = 0;
358 		break;
359 	}
360 	return rc;
361 }
362 
363 
364 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
365 {
366 	umode_t rc;
367 
368 	switch (type) {
369 	case ISCSI_BOOT_ETH_FLAGS:
370 	case ISCSI_BOOT_ETH_MAC:
371 	case ISCSI_BOOT_ETH_INDEX:
372 		rc = S_IRUGO;
373 		break;
374 	default:
375 		rc = 0;
376 		break;
377 	}
378 	return rc;
379 }
380 
381 /*------------------- PCI Driver operations and data ----------------- */
382 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
383 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
384 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
385 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
386 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
387 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
388 	{ 0 }
389 };
390 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
391 
392 static struct scsi_host_template beiscsi_sht = {
393 	.module = THIS_MODULE,
394 	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
395 	.proc_name = DRV_NAME,
396 	.queuecommand = iscsi_queuecommand,
397 	.change_queue_depth = iscsi_change_queue_depth,
398 	.slave_configure = beiscsi_slave_configure,
399 	.target_alloc = iscsi_target_alloc,
400 	.eh_abort_handler = beiscsi_eh_abort,
401 	.eh_device_reset_handler = beiscsi_eh_device_reset,
402 	.eh_target_reset_handler = iscsi_eh_session_reset,
403 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
404 	.can_queue = BE2_IO_DEPTH,
405 	.this_id = -1,
406 	.max_sectors = BEISCSI_MAX_SECTORS,
407 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
408 	.use_clustering = ENABLE_CLUSTERING,
409 	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
410 
411 };
412 
413 static struct scsi_transport_template *beiscsi_scsi_transport;
414 
415 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
416 {
417 	struct beiscsi_hba *phba;
418 	struct Scsi_Host *shost;
419 
420 	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
421 	if (!shost) {
422 		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
423 			"iscsi_host_alloc failed\n");
424 		return NULL;
425 	}
426 	shost->dma_boundary = pcidev->dma_mask;
427 	shost->max_id = BE2_MAX_SESSIONS;
428 	shost->max_channel = 0;
429 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
430 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
431 	shost->transportt = beiscsi_scsi_transport;
432 	phba = iscsi_host_priv(shost);
433 	memset(phba, 0, sizeof(*phba));
434 	phba->shost = shost;
435 	phba->pcidev = pci_dev_get(pcidev);
436 	pci_set_drvdata(pcidev, phba);
437 	phba->interface_handle = 0xFFFFFFFF;
438 
439 	if (iscsi_host_add(shost, &phba->pcidev->dev))
440 		goto free_devices;
441 
442 	return phba;
443 
444 free_devices:
445 	pci_dev_put(phba->pcidev);
446 	iscsi_host_free(phba->shost);
447 	return NULL;
448 }
449 
450 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
451 {
452 	if (phba->csr_va) {
453 		iounmap(phba->csr_va);
454 		phba->csr_va = NULL;
455 	}
456 	if (phba->db_va) {
457 		iounmap(phba->db_va);
458 		phba->db_va = NULL;
459 	}
460 	if (phba->pci_va) {
461 		iounmap(phba->pci_va);
462 		phba->pci_va = NULL;
463 	}
464 }
465 
466 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
467 				struct pci_dev *pcidev)
468 {
469 	u8 __iomem *addr;
470 	int pcicfg_reg;
471 
472 	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
473 			       pci_resource_len(pcidev, 2));
474 	if (addr == NULL)
475 		return -ENOMEM;
476 	phba->ctrl.csr = addr;
477 	phba->csr_va = addr;
478 	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
479 
480 	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
481 	if (addr == NULL)
482 		goto pci_map_err;
483 	phba->ctrl.db = addr;
484 	phba->db_va = addr;
485 	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
486 
487 	if (phba->generation == BE_GEN2)
488 		pcicfg_reg = 1;
489 	else
490 		pcicfg_reg = 0;
491 
492 	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
493 			       pci_resource_len(pcidev, pcicfg_reg));
494 
495 	if (addr == NULL)
496 		goto pci_map_err;
497 	phba->ctrl.pcicfg = addr;
498 	phba->pci_va = addr;
499 	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
500 	return 0;
501 
502 pci_map_err:
503 	beiscsi_unmap_pci_function(phba);
504 	return -ENOMEM;
505 }
506 
507 static int beiscsi_enable_pci(struct pci_dev *pcidev)
508 {
509 	int ret;
510 
511 	ret = pci_enable_device(pcidev);
512 	if (ret) {
513 		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
514 			"failed. Returning -ENODEV\n");
515 		return ret;
516 	}
517 
518 	pci_set_master(pcidev);
519 	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
520 		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
521 		if (ret) {
522 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
523 			pci_disable_device(pcidev);
524 			return ret;
525 		}
526 	}
527 	return 0;
528 }
529 
530 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
531 {
532 	struct be_ctrl_info *ctrl = &phba->ctrl;
533 	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
534 	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
535 	int status = 0;
536 
537 	ctrl->pdev = pdev;
538 	status = beiscsi_map_pci_bars(phba, pdev);
539 	if (status)
540 		return status;
541 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
542 	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
543 						  mbox_mem_alloc->size,
544 						  &mbox_mem_alloc->dma);
545 	if (!mbox_mem_alloc->va) {
546 		beiscsi_unmap_pci_function(phba);
547 		return -ENOMEM;
548 	}
549 
550 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
551 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
552 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
553 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
554 	spin_lock_init(&ctrl->mbox_lock);
555 	spin_lock_init(&phba->ctrl.mcc_lock);
556 	spin_lock_init(&phba->ctrl.mcc_cq_lock);
557 
558 	return status;
559 }
560 
561 static void beiscsi_get_params(struct beiscsi_hba *phba)
562 {
563 	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
564 				    - (phba->fw_config.iscsi_cid_count
565 				    + BE2_TMFS
566 				    + BE2_NOPOUT_REQ));
567 	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
568 	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
569 	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
570 	phba->params.num_sge_per_io = BE2_SGE;
571 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
572 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
573 	phba->params.eq_timer = 64;
574 	phba->params.num_eq_entries =
575 	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
576 				    + BE2_TMFS) / 512) + 1) * 512;
577 	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
578 				? 1024 : phba->params.num_eq_entries;
579 	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
580 			     phba->params.num_eq_entries);
581 	phba->params.num_cq_entries =
582 	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
583 				    + BE2_TMFS) / 512) + 1) * 512;
584 	phba->params.wrbs_per_cxn = 256;
585 }
586 
587 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
588 			   unsigned int id, unsigned int clr_interrupt,
589 			   unsigned int num_processed,
590 			   unsigned char rearm, unsigned char event)
591 {
592 	u32 val = 0;
593 	val |= id & DB_EQ_RING_ID_MASK;
594 	if (rearm)
595 		val |= 1 << DB_EQ_REARM_SHIFT;
596 	if (clr_interrupt)
597 		val |= 1 << DB_EQ_CLR_SHIFT;
598 	if (event)
599 		val |= 1 << DB_EQ_EVNT_SHIFT;
600 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
601 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
602 }
603 
604 /**
605  * be_isr_mcc - The isr routine of the driver.
606  * @irq: Not used
607  * @dev_id: Pointer to host adapter structure
608  */
609 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
610 {
611 	struct beiscsi_hba *phba;
612 	struct be_eq_entry *eqe = NULL;
613 	struct be_queue_info *eq;
614 	struct be_queue_info *mcc;
615 	unsigned int num_eq_processed;
616 	struct be_eq_obj *pbe_eq;
617 	unsigned long flags;
618 
619 	pbe_eq = dev_id;
620 	eq = &pbe_eq->q;
621 	phba =  pbe_eq->phba;
622 	mcc = &phba->ctrl.mcc_obj.cq;
623 	eqe = queue_tail_node(eq);
624 	if (!eqe)
625 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
626 
627 	num_eq_processed = 0;
628 
629 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
630 				& EQE_VALID_MASK) {
631 		if (((eqe->dw[offsetof(struct amap_eq_entry,
632 		     resource_id) / 32] &
633 		     EQE_RESID_MASK) >> 16) == mcc->id) {
634 			spin_lock_irqsave(&phba->isr_lock, flags);
635 			phba->todo_mcc_cq = 1;
636 			spin_unlock_irqrestore(&phba->isr_lock, flags);
637 		}
638 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
639 		queue_tail_inc(eq);
640 		eqe = queue_tail_node(eq);
641 		num_eq_processed++;
642 	}
643 	if (phba->todo_mcc_cq)
644 		queue_work(phba->wq, &phba->work_cqs);
645 	if (num_eq_processed)
646 		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
647 
648 	return IRQ_HANDLED;
649 }
650 
651 /**
652  * be_isr_msix - The isr routine of the driver.
653  * @irq: Not used
654  * @dev_id: Pointer to host adapter structure
655  */
656 static irqreturn_t be_isr_msix(int irq, void *dev_id)
657 {
658 	struct beiscsi_hba *phba;
659 	struct be_eq_entry *eqe = NULL;
660 	struct be_queue_info *eq;
661 	struct be_queue_info *cq;
662 	unsigned int num_eq_processed;
663 	struct be_eq_obj *pbe_eq;
664 	unsigned long flags;
665 
666 	pbe_eq = dev_id;
667 	eq = &pbe_eq->q;
668 	cq = pbe_eq->cq;
669 	eqe = queue_tail_node(eq);
670 	if (!eqe)
671 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
672 
673 	phba = pbe_eq->phba;
674 	num_eq_processed = 0;
675 	if (blk_iopoll_enabled) {
676 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
677 					& EQE_VALID_MASK) {
678 			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
679 				blk_iopoll_sched(&pbe_eq->iopoll);
680 
681 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
682 			queue_tail_inc(eq);
683 			eqe = queue_tail_node(eq);
684 			num_eq_processed++;
685 		}
686 		if (num_eq_processed)
687 			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
688 
689 		return IRQ_HANDLED;
690 	} else {
691 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
692 						& EQE_VALID_MASK) {
693 			spin_lock_irqsave(&phba->isr_lock, flags);
694 			phba->todo_cq = 1;
695 			spin_unlock_irqrestore(&phba->isr_lock, flags);
696 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
697 			queue_tail_inc(eq);
698 			eqe = queue_tail_node(eq);
699 			num_eq_processed++;
700 		}
701 		if (phba->todo_cq)
702 			queue_work(phba->wq, &phba->work_cqs);
703 
704 		if (num_eq_processed)
705 			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
706 
707 		return IRQ_HANDLED;
708 	}
709 }
710 
711 /**
712  * be_isr - The isr routine of the driver.
713  * @irq: Not used
714  * @dev_id: Pointer to host adapter structure
715  */
716 static irqreturn_t be_isr(int irq, void *dev_id)
717 {
718 	struct beiscsi_hba *phba;
719 	struct hwi_controller *phwi_ctrlr;
720 	struct hwi_context_memory *phwi_context;
721 	struct be_eq_entry *eqe = NULL;
722 	struct be_queue_info *eq;
723 	struct be_queue_info *cq;
724 	struct be_queue_info *mcc;
725 	unsigned long flags, index;
726 	unsigned int num_mcceq_processed, num_ioeq_processed;
727 	struct be_ctrl_info *ctrl;
728 	struct be_eq_obj *pbe_eq;
729 	int isr;
730 
731 	phba = dev_id;
732 	ctrl = &phba->ctrl;
733 	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
734 		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
735 	if (!isr)
736 		return IRQ_NONE;
737 
738 	phwi_ctrlr = phba->phwi_ctrlr;
739 	phwi_context = phwi_ctrlr->phwi_ctxt;
740 	pbe_eq = &phwi_context->be_eq[0];
741 
742 	eq = &phwi_context->be_eq[0].q;
743 	mcc = &phba->ctrl.mcc_obj.cq;
744 	index = 0;
745 	eqe = queue_tail_node(eq);
746 	if (!eqe)
747 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
748 
749 	num_ioeq_processed = 0;
750 	num_mcceq_processed = 0;
751 	if (blk_iopoll_enabled) {
752 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
753 					& EQE_VALID_MASK) {
754 			if (((eqe->dw[offsetof(struct amap_eq_entry,
755 			     resource_id) / 32] &
756 			     EQE_RESID_MASK) >> 16) == mcc->id) {
757 				spin_lock_irqsave(&phba->isr_lock, flags);
758 				phba->todo_mcc_cq = 1;
759 				spin_unlock_irqrestore(&phba->isr_lock, flags);
760 				num_mcceq_processed++;
761 			} else {
762 				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
763 					blk_iopoll_sched(&pbe_eq->iopoll);
764 				num_ioeq_processed++;
765 			}
766 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
767 			queue_tail_inc(eq);
768 			eqe = queue_tail_node(eq);
769 		}
770 		if (num_ioeq_processed || num_mcceq_processed) {
771 			if (phba->todo_mcc_cq)
772 				queue_work(phba->wq, &phba->work_cqs);
773 
774 			if ((num_mcceq_processed) && (!num_ioeq_processed))
775 				hwi_ring_eq_db(phba, eq->id, 0,
776 					      (num_ioeq_processed +
777 					       num_mcceq_processed) , 1, 1);
778 			else
779 				hwi_ring_eq_db(phba, eq->id, 0,
780 					       (num_ioeq_processed +
781 						num_mcceq_processed), 0, 1);
782 
783 			return IRQ_HANDLED;
784 		} else
785 			return IRQ_NONE;
786 	} else {
787 		cq = &phwi_context->be_cq[0];
788 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
789 						& EQE_VALID_MASK) {
790 
791 			if (((eqe->dw[offsetof(struct amap_eq_entry,
792 			     resource_id) / 32] &
793 			     EQE_RESID_MASK) >> 16) != cq->id) {
794 				spin_lock_irqsave(&phba->isr_lock, flags);
795 				phba->todo_mcc_cq = 1;
796 				spin_unlock_irqrestore(&phba->isr_lock, flags);
797 			} else {
798 				spin_lock_irqsave(&phba->isr_lock, flags);
799 				phba->todo_cq = 1;
800 				spin_unlock_irqrestore(&phba->isr_lock, flags);
801 			}
802 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
803 			queue_tail_inc(eq);
804 			eqe = queue_tail_node(eq);
805 			num_ioeq_processed++;
806 		}
807 		if (phba->todo_cq || phba->todo_mcc_cq)
808 			queue_work(phba->wq, &phba->work_cqs);
809 
810 		if (num_ioeq_processed) {
811 			hwi_ring_eq_db(phba, eq->id, 0,
812 				       num_ioeq_processed, 1, 1);
813 			return IRQ_HANDLED;
814 		} else
815 			return IRQ_NONE;
816 	}
817 }
818 
819 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
820 {
821 	struct pci_dev *pcidev = phba->pcidev;
822 	struct hwi_controller *phwi_ctrlr;
823 	struct hwi_context_memory *phwi_context;
824 	int ret, msix_vec, i, j;
825 
826 	phwi_ctrlr = phba->phwi_ctrlr;
827 	phwi_context = phwi_ctrlr->phwi_ctxt;
828 
829 	if (phba->msix_enabled) {
830 		for (i = 0; i < phba->num_cpus; i++) {
831 			phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
832 						    GFP_KERNEL);
833 			if (!phba->msi_name[i]) {
834 				ret = -ENOMEM;
835 				goto free_msix_irqs;
836 			}
837 
838 			sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
839 				phba->shost->host_no, i);
840 			msix_vec = phba->msix_entries[i].vector;
841 			ret = request_irq(msix_vec, be_isr_msix, 0,
842 					  phba->msi_name[i],
843 					  &phwi_context->be_eq[i]);
844 			if (ret) {
845 				shost_printk(KERN_ERR, phba->shost,
846 					     "beiscsi_init_irqs-Failed to"
847 					     "register msix for i = %d\n", i);
848 				kfree(phba->msi_name[i]);
849 				goto free_msix_irqs;
850 			}
851 		}
852 		phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
853 		if (!phba->msi_name[i]) {
854 			ret = -ENOMEM;
855 			goto free_msix_irqs;
856 		}
857 		sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
858 			phba->shost->host_no);
859 		msix_vec = phba->msix_entries[i].vector;
860 		ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
861 				  &phwi_context->be_eq[i]);
862 		if (ret) {
863 			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
864 				     "Failed to register beiscsi_msix_mcc\n");
865 			kfree(phba->msi_name[i]);
866 			goto free_msix_irqs;
867 		}
868 
869 	} else {
870 		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
871 				  "beiscsi", phba);
872 		if (ret) {
873 			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
874 				     "Failed to register irq\\n");
875 			return ret;
876 		}
877 	}
878 	return 0;
879 free_msix_irqs:
880 	for (j = i - 1; j >= 0; j--) {
881 		kfree(phba->msi_name[j]);
882 		msix_vec = phba->msix_entries[j].vector;
883 		free_irq(msix_vec, &phwi_context->be_eq[j]);
884 	}
885 	return ret;
886 }
887 
888 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
889 			   unsigned int id, unsigned int num_processed,
890 			   unsigned char rearm, unsigned char event)
891 {
892 	u32 val = 0;
893 	val |= id & DB_CQ_RING_ID_MASK;
894 	if (rearm)
895 		val |= 1 << DB_CQ_REARM_SHIFT;
896 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
897 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
898 }
899 
900 static unsigned int
901 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
902 			  struct beiscsi_hba *phba,
903 			  unsigned short cid,
904 			  struct pdu_base *ppdu,
905 			  unsigned long pdu_len,
906 			  void *pbuffer, unsigned long buf_len)
907 {
908 	struct iscsi_conn *conn = beiscsi_conn->conn;
909 	struct iscsi_session *session = conn->session;
910 	struct iscsi_task *task;
911 	struct beiscsi_io_task *io_task;
912 	struct iscsi_hdr *login_hdr;
913 
914 	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
915 						PDUBASE_OPCODE_MASK) {
916 	case ISCSI_OP_NOOP_IN:
917 		pbuffer = NULL;
918 		buf_len = 0;
919 		break;
920 	case ISCSI_OP_ASYNC_EVENT:
921 		break;
922 	case ISCSI_OP_REJECT:
923 		WARN_ON(!pbuffer);
924 		WARN_ON(!(buf_len == 48));
925 		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
926 		break;
927 	case ISCSI_OP_LOGIN_RSP:
928 	case ISCSI_OP_TEXT_RSP:
929 		task = conn->login_task;
930 		io_task = task->dd_data;
931 		login_hdr = (struct iscsi_hdr *)ppdu;
932 		login_hdr->itt = io_task->libiscsi_itt;
933 		break;
934 	default:
935 		shost_printk(KERN_WARNING, phba->shost,
936 			     "Unrecognized opcode 0x%x in async msg\n",
937 			     (ppdu->
938 			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
939 						& PDUBASE_OPCODE_MASK));
940 		return 1;
941 	}
942 
943 	spin_lock_bh(&session->lock);
944 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
945 	spin_unlock_bh(&session->lock);
946 	return 0;
947 }
948 
949 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
950 {
951 	struct sgl_handle *psgl_handle;
952 
953 	if (phba->io_sgl_hndl_avbl) {
954 		SE_DEBUG(DBG_LVL_8,
955 			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
956 			 phba->io_sgl_alloc_index);
957 		psgl_handle = phba->io_sgl_hndl_base[phba->
958 						io_sgl_alloc_index];
959 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
960 		phba->io_sgl_hndl_avbl--;
961 		if (phba->io_sgl_alloc_index == (phba->params.
962 						 ios_per_ctrl - 1))
963 			phba->io_sgl_alloc_index = 0;
964 		else
965 			phba->io_sgl_alloc_index++;
966 	} else
967 		psgl_handle = NULL;
968 	return psgl_handle;
969 }
970 
971 static void
972 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
973 {
974 	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
975 		 phba->io_sgl_free_index);
976 	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
977 		/*
978 		 * this can happen if clean_task is called on a task that
979 		 * failed in xmit_task or alloc_pdu.
980 		 */
981 		 SE_DEBUG(DBG_LVL_8,
982 			 "Double Free in IO SGL io_sgl_free_index=%d,"
983 			 "value there=%p\n", phba->io_sgl_free_index,
984 			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
985 		return;
986 	}
987 	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
988 	phba->io_sgl_hndl_avbl++;
989 	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
990 		phba->io_sgl_free_index = 0;
991 	else
992 		phba->io_sgl_free_index++;
993 }
994 
995 /**
996  * alloc_wrb_handle - To allocate a wrb handle
997  * @phba: The hba pointer
998  * @cid: The cid to use for allocation
999  *
1000  * This happens under session_lock until submission to chip
1001  */
1002 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1003 {
1004 	struct hwi_wrb_context *pwrb_context;
1005 	struct hwi_controller *phwi_ctrlr;
1006 	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1007 
1008 	phwi_ctrlr = phba->phwi_ctrlr;
1009 	pwrb_context = &phwi_ctrlr->wrb_context[cid];
1010 	if (pwrb_context->wrb_handles_available >= 2) {
1011 		pwrb_handle = pwrb_context->pwrb_handle_base[
1012 					    pwrb_context->alloc_index];
1013 		pwrb_context->wrb_handles_available--;
1014 		if (pwrb_context->alloc_index ==
1015 						(phba->params.wrbs_per_cxn - 1))
1016 			pwrb_context->alloc_index = 0;
1017 		else
1018 			pwrb_context->alloc_index++;
1019 		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1020 						pwrb_context->alloc_index];
1021 		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1022 	} else
1023 		pwrb_handle = NULL;
1024 	return pwrb_handle;
1025 }
1026 
1027 /**
1028  * free_wrb_handle - To free the wrb handle back to pool
1029  * @phba: The hba pointer
1030  * @pwrb_context: The context to free from
1031  * @pwrb_handle: The wrb_handle to free
1032  *
1033  * This happens under session_lock until submission to chip
1034  */
1035 static void
1036 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1037 		struct wrb_handle *pwrb_handle)
1038 {
1039 	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1040 	pwrb_context->wrb_handles_available++;
1041 	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1042 		pwrb_context->free_index = 0;
1043 	else
1044 		pwrb_context->free_index++;
1045 
1046 	SE_DEBUG(DBG_LVL_8,
1047 		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1048 		 "wrb_handles_available=%d\n",
1049 		 pwrb_handle, pwrb_context->free_index,
1050 		 pwrb_context->wrb_handles_available);
1051 }
1052 
1053 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1054 {
1055 	struct sgl_handle *psgl_handle;
1056 
1057 	if (phba->eh_sgl_hndl_avbl) {
1058 		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1059 		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1060 		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
1061 			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1062 		phba->eh_sgl_hndl_avbl--;
1063 		if (phba->eh_sgl_alloc_index ==
1064 		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1065 		     1))
1066 			phba->eh_sgl_alloc_index = 0;
1067 		else
1068 			phba->eh_sgl_alloc_index++;
1069 	} else
1070 		psgl_handle = NULL;
1071 	return psgl_handle;
1072 }
1073 
1074 void
1075 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1076 {
1077 
1078 	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1079 			     phba->eh_sgl_free_index);
1080 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1081 		/*
1082 		 * this can happen if clean_task is called on a task that
1083 		 * failed in xmit_task or alloc_pdu.
1084 		 */
1085 		SE_DEBUG(DBG_LVL_8,
1086 			 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1087 			 phba->eh_sgl_free_index);
1088 		return;
1089 	}
1090 	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1091 	phba->eh_sgl_hndl_avbl++;
1092 	if (phba->eh_sgl_free_index ==
1093 	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1094 		phba->eh_sgl_free_index = 0;
1095 	else
1096 		phba->eh_sgl_free_index++;
1097 }
1098 
1099 static void
1100 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1101 	       struct iscsi_task *task, struct sol_cqe *psol)
1102 {
1103 	struct beiscsi_io_task *io_task = task->dd_data;
1104 	struct be_status_bhs *sts_bhs =
1105 				(struct be_status_bhs *)io_task->cmd_bhs;
1106 	struct iscsi_conn *conn = beiscsi_conn->conn;
1107 	unsigned char *sense;
1108 	u32 resid = 0, exp_cmdsn, max_cmdsn;
1109 	u8 rsp, status, flags;
1110 
1111 	exp_cmdsn = (psol->
1112 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1113 			& SOL_EXP_CMD_SN_MASK);
1114 	max_cmdsn = ((psol->
1115 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1116 			& SOL_EXP_CMD_SN_MASK) +
1117 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1118 				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1119 	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1120 						& SOL_RESP_MASK) >> 16);
1121 	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1122 						& SOL_STS_MASK) >> 8);
1123 	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1124 					& SOL_FLAGS_MASK) >> 24) | 0x80;
1125 	if (!task->sc) {
1126 		if (io_task->scsi_cmnd)
1127 			scsi_dma_unmap(io_task->scsi_cmnd);
1128 
1129 		return;
1130 	}
1131 	task->sc->result = (DID_OK << 16) | status;
1132 	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1133 		task->sc->result = DID_ERROR << 16;
1134 		goto unmap;
1135 	}
1136 
1137 	/* bidi not initially supported */
1138 	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1139 		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1140 				32] & SOL_RES_CNT_MASK);
1141 
1142 		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1143 			task->sc->result = DID_ERROR << 16;
1144 
1145 		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1146 			scsi_set_resid(task->sc, resid);
1147 			if (!status && (scsi_bufflen(task->sc) - resid <
1148 			    task->sc->underflow))
1149 				task->sc->result = DID_ERROR << 16;
1150 		}
1151 	}
1152 
1153 	if (status == SAM_STAT_CHECK_CONDITION) {
1154 		u16 sense_len;
1155 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1156 
1157 		sense = sts_bhs->sense_info + sizeof(unsigned short);
1158 		sense_len = be16_to_cpu(*slen);
1159 		memcpy(task->sc->sense_buffer, sense,
1160 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1161 	}
1162 
1163 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1164 		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1165 							& SOL_RES_CNT_MASK)
1166 			 conn->rxdata_octets += (psol->
1167 			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1168 			     & SOL_RES_CNT_MASK);
1169 	}
1170 unmap:
1171 	scsi_dma_unmap(io_task->scsi_cmnd);
1172 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1173 }
1174 
1175 static void
1176 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1177 		   struct iscsi_task *task, struct sol_cqe *psol)
1178 {
1179 	struct iscsi_logout_rsp *hdr;
1180 	struct beiscsi_io_task *io_task = task->dd_data;
1181 	struct iscsi_conn *conn = beiscsi_conn->conn;
1182 
1183 	hdr = (struct iscsi_logout_rsp *)task->hdr;
1184 	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1185 	hdr->t2wait = 5;
1186 	hdr->t2retain = 0;
1187 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1188 					& SOL_FLAGS_MASK) >> 24) | 0x80;
1189 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1190 					32] & SOL_RESP_MASK);
1191 	hdr->exp_cmdsn = cpu_to_be32(psol->
1192 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1193 					& SOL_EXP_CMD_SN_MASK);
1194 	hdr->max_cmdsn = be32_to_cpu((psol->
1195 			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1196 					& SOL_EXP_CMD_SN_MASK) +
1197 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1198 					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1199 	hdr->dlength[0] = 0;
1200 	hdr->dlength[1] = 0;
1201 	hdr->dlength[2] = 0;
1202 	hdr->hlength = 0;
1203 	hdr->itt = io_task->libiscsi_itt;
1204 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1205 }
1206 
1207 static void
1208 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1209 		struct iscsi_task *task, struct sol_cqe *psol)
1210 {
1211 	struct iscsi_tm_rsp *hdr;
1212 	struct iscsi_conn *conn = beiscsi_conn->conn;
1213 	struct beiscsi_io_task *io_task = task->dd_data;
1214 
1215 	hdr = (struct iscsi_tm_rsp *)task->hdr;
1216 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1217 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1218 					& SOL_FLAGS_MASK) >> 24) | 0x80;
1219 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1220 					32] & SOL_RESP_MASK);
1221 	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1222 				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1223 	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1224 			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1225 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1226 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1227 	hdr->itt = io_task->libiscsi_itt;
1228 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1229 }
1230 
1231 static void
1232 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1233 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1234 {
1235 	struct hwi_wrb_context *pwrb_context;
1236 	struct wrb_handle *pwrb_handle = NULL;
1237 	struct hwi_controller *phwi_ctrlr;
1238 	struct iscsi_task *task;
1239 	struct beiscsi_io_task *io_task;
1240 	struct iscsi_conn *conn = beiscsi_conn->conn;
1241 	struct iscsi_session *session = conn->session;
1242 
1243 	phwi_ctrlr = phba->phwi_ctrlr;
1244 	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1245 				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1246 				SOL_CID_MASK) >> 6) -
1247 				phba->fw_config.iscsi_cid_start];
1248 	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1249 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1250 				32] & SOL_WRB_INDEX_MASK) >> 16)];
1251 	task = pwrb_handle->pio_handle;
1252 
1253 	io_task = task->dd_data;
1254 	spin_lock_bh(&phba->mgmt_sgl_lock);
1255 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1256 	spin_unlock_bh(&phba->mgmt_sgl_lock);
1257 	spin_lock_bh(&session->lock);
1258 	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1259 	spin_unlock_bh(&session->lock);
1260 }
1261 
1262 static void
1263 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1264 		       struct iscsi_task *task, struct sol_cqe *psol)
1265 {
1266 	struct iscsi_nopin *hdr;
1267 	struct iscsi_conn *conn = beiscsi_conn->conn;
1268 	struct beiscsi_io_task *io_task = task->dd_data;
1269 
1270 	hdr = (struct iscsi_nopin *)task->hdr;
1271 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1272 			& SOL_FLAGS_MASK) >> 24) | 0x80;
1273 	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1274 				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1275 	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1276 			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1277 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1278 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1279 	hdr->opcode = ISCSI_OP_NOOP_IN;
1280 	hdr->itt = io_task->libiscsi_itt;
1281 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1282 }
1283 
1284 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1285 			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1286 {
1287 	struct hwi_wrb_context *pwrb_context;
1288 	struct wrb_handle *pwrb_handle;
1289 	struct iscsi_wrb *pwrb = NULL;
1290 	struct hwi_controller *phwi_ctrlr;
1291 	struct iscsi_task *task;
1292 	unsigned int type;
1293 	struct iscsi_conn *conn = beiscsi_conn->conn;
1294 	struct iscsi_session *session = conn->session;
1295 
1296 	phwi_ctrlr = phba->phwi_ctrlr;
1297 	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1298 				(struct amap_sol_cqe, cid) / 32]
1299 				& SOL_CID_MASK) >> 6) -
1300 				phba->fw_config.iscsi_cid_start];
1301 	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1302 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1303 				32] & SOL_WRB_INDEX_MASK) >> 16)];
1304 	task = pwrb_handle->pio_handle;
1305 	pwrb = pwrb_handle->pwrb;
1306 	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1307 				 WRB_TYPE_MASK) >> 28;
1308 
1309 	spin_lock_bh(&session->lock);
1310 	switch (type) {
1311 	case HWH_TYPE_IO:
1312 	case HWH_TYPE_IO_RD:
1313 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1314 		     ISCSI_OP_NOOP_OUT)
1315 			be_complete_nopin_resp(beiscsi_conn, task, psol);
1316 		else
1317 			be_complete_io(beiscsi_conn, task, psol);
1318 		break;
1319 
1320 	case HWH_TYPE_LOGOUT:
1321 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1322 			be_complete_logout(beiscsi_conn, task, psol);
1323 		else
1324 			be_complete_tmf(beiscsi_conn, task, psol);
1325 
1326 		break;
1327 
1328 	case HWH_TYPE_LOGIN:
1329 		SE_DEBUG(DBG_LVL_1,
1330 			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1331 			 "- Solicited path\n");
1332 		break;
1333 
1334 	case HWH_TYPE_NOP:
1335 		be_complete_nopin_resp(beiscsi_conn, task, psol);
1336 		break;
1337 
1338 	default:
1339 		shost_printk(KERN_WARNING, phba->shost,
1340 				"In hwi_complete_cmd, unknown type = %d"
1341 				"wrb_index 0x%x CID 0x%x\n", type,
1342 				((psol->dw[offsetof(struct amap_iscsi_wrb,
1343 				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1344 				((psol->dw[offsetof(struct amap_sol_cqe,
1345 				cid) / 32] & SOL_CID_MASK) >> 6));
1346 		break;
1347 	}
1348 
1349 	spin_unlock_bh(&session->lock);
1350 }
1351 
1352 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1353 					  *pasync_ctx, unsigned int is_header,
1354 					  unsigned int host_write_ptr)
1355 {
1356 	if (is_header)
1357 		return &pasync_ctx->async_entry[host_write_ptr].
1358 		    header_busy_list;
1359 	else
1360 		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1361 }
1362 
1363 static struct async_pdu_handle *
1364 hwi_get_async_handle(struct beiscsi_hba *phba,
1365 		     struct beiscsi_conn *beiscsi_conn,
1366 		     struct hwi_async_pdu_context *pasync_ctx,
1367 		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1368 {
1369 	struct be_bus_address phys_addr;
1370 	struct list_head *pbusy_list;
1371 	struct async_pdu_handle *pasync_handle = NULL;
1372 	unsigned char is_header = 0;
1373 
1374 	phys_addr.u.a32.address_lo =
1375 	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1376 	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1377 						& PDUCQE_DPL_MASK) >> 16);
1378 	phys_addr.u.a32.address_hi =
1379 	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1380 
1381 	phys_addr.u.a64.address =
1382 			*((unsigned long long *)(&phys_addr.u.a64.address));
1383 
1384 	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1385 			& PDUCQE_CODE_MASK) {
1386 	case UNSOL_HDR_NOTIFY:
1387 		is_header = 1;
1388 
1389 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1390 			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1391 			index) / 32] & PDUCQE_INDEX_MASK));
1392 		break;
1393 	case UNSOL_DATA_NOTIFY:
1394 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1395 					dw[offsetof(struct amap_i_t_dpdu_cqe,
1396 					index) / 32] & PDUCQE_INDEX_MASK));
1397 		break;
1398 	default:
1399 		pbusy_list = NULL;
1400 		shost_printk(KERN_WARNING, phba->shost,
1401 			"Unexpected code=%d\n",
1402 			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1403 					code) / 32] & PDUCQE_CODE_MASK);
1404 		return NULL;
1405 	}
1406 
1407 	WARN_ON(list_empty(pbusy_list));
1408 	list_for_each_entry(pasync_handle, pbusy_list, link) {
1409 		if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
1410 			break;
1411 	}
1412 
1413 	WARN_ON(!pasync_handle);
1414 
1415 	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1416 					     phba->fw_config.iscsi_cid_start;
1417 	pasync_handle->is_header = is_header;
1418 	pasync_handle->buffer_len = ((pdpdu_cqe->
1419 			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1420 			& PDUCQE_DPL_MASK) >> 16);
1421 
1422 	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1423 			index) / 32] & PDUCQE_INDEX_MASK);
1424 	return pasync_handle;
1425 }
1426 
1427 static unsigned int
1428 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1429 			   unsigned int is_header, unsigned int cq_index)
1430 {
1431 	struct list_head *pbusy_list;
1432 	struct async_pdu_handle *pasync_handle;
1433 	unsigned int num_entries, writables = 0;
1434 	unsigned int *pep_read_ptr, *pwritables;
1435 
1436 	num_entries = pasync_ctx->num_entries;
1437 	if (is_header) {
1438 		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1439 		pwritables = &pasync_ctx->async_header.writables;
1440 	} else {
1441 		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1442 		pwritables = &pasync_ctx->async_data.writables;
1443 	}
1444 
1445 	while ((*pep_read_ptr) != cq_index) {
1446 		(*pep_read_ptr)++;
1447 		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1448 
1449 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1450 						     *pep_read_ptr);
1451 		if (writables == 0)
1452 			WARN_ON(list_empty(pbusy_list));
1453 
1454 		if (!list_empty(pbusy_list)) {
1455 			pasync_handle = list_entry(pbusy_list->next,
1456 						   struct async_pdu_handle,
1457 						   link);
1458 			WARN_ON(!pasync_handle);
1459 			pasync_handle->consumed = 1;
1460 		}
1461 
1462 		writables++;
1463 	}
1464 
1465 	if (!writables) {
1466 		SE_DEBUG(DBG_LVL_1,
1467 			 "Duplicate notification received - index 0x%x!!\n",
1468 			 cq_index);
1469 		WARN_ON(1);
1470 	}
1471 
1472 	*pwritables = *pwritables + writables;
1473 	return 0;
1474 }
1475 
1476 static void hwi_free_async_msg(struct beiscsi_hba *phba,
1477 				       unsigned int cri)
1478 {
1479 	struct hwi_controller *phwi_ctrlr;
1480 	struct hwi_async_pdu_context *pasync_ctx;
1481 	struct async_pdu_handle *pasync_handle, *tmp_handle;
1482 	struct list_head *plist;
1483 
1484 	phwi_ctrlr = phba->phwi_ctrlr;
1485 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1486 
1487 	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1488 
1489 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1490 		list_del(&pasync_handle->link);
1491 
1492 		if (pasync_handle->is_header) {
1493 			list_add_tail(&pasync_handle->link,
1494 				      &pasync_ctx->async_header.free_list);
1495 			pasync_ctx->async_header.free_entries++;
1496 		} else {
1497 			list_add_tail(&pasync_handle->link,
1498 				      &pasync_ctx->async_data.free_list);
1499 			pasync_ctx->async_data.free_entries++;
1500 		}
1501 	}
1502 
1503 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1504 	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1505 	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1506 }
1507 
1508 static struct phys_addr *
1509 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1510 		     unsigned int is_header, unsigned int host_write_ptr)
1511 {
1512 	struct phys_addr *pasync_sge = NULL;
1513 
1514 	if (is_header)
1515 		pasync_sge = pasync_ctx->async_header.ring_base;
1516 	else
1517 		pasync_sge = pasync_ctx->async_data.ring_base;
1518 
1519 	return pasync_sge + host_write_ptr;
1520 }
1521 
1522 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1523 				   unsigned int is_header)
1524 {
1525 	struct hwi_controller *phwi_ctrlr;
1526 	struct hwi_async_pdu_context *pasync_ctx;
1527 	struct async_pdu_handle *pasync_handle;
1528 	struct list_head *pfree_link, *pbusy_list;
1529 	struct phys_addr *pasync_sge;
1530 	unsigned int ring_id, num_entries;
1531 	unsigned int host_write_num;
1532 	unsigned int writables;
1533 	unsigned int i = 0;
1534 	u32 doorbell = 0;
1535 
1536 	phwi_ctrlr = phba->phwi_ctrlr;
1537 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1538 	num_entries = pasync_ctx->num_entries;
1539 
1540 	if (is_header) {
1541 		writables = min(pasync_ctx->async_header.writables,
1542 				pasync_ctx->async_header.free_entries);
1543 		pfree_link = pasync_ctx->async_header.free_list.next;
1544 		host_write_num = pasync_ctx->async_header.host_write_ptr;
1545 		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1546 	} else {
1547 		writables = min(pasync_ctx->async_data.writables,
1548 				pasync_ctx->async_data.free_entries);
1549 		pfree_link = pasync_ctx->async_data.free_list.next;
1550 		host_write_num = pasync_ctx->async_data.host_write_ptr;
1551 		ring_id = phwi_ctrlr->default_pdu_data.id;
1552 	}
1553 
1554 	writables = (writables / 8) * 8;
1555 	if (writables) {
1556 		for (i = 0; i < writables; i++) {
1557 			pbusy_list =
1558 			    hwi_get_async_busy_list(pasync_ctx, is_header,
1559 						    host_write_num);
1560 			pasync_handle =
1561 			    list_entry(pfree_link, struct async_pdu_handle,
1562 								link);
1563 			WARN_ON(!pasync_handle);
1564 			pasync_handle->consumed = 0;
1565 
1566 			pfree_link = pfree_link->next;
1567 
1568 			pasync_sge = hwi_get_ring_address(pasync_ctx,
1569 						is_header, host_write_num);
1570 
1571 			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1572 			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1573 
1574 			list_move(&pasync_handle->link, pbusy_list);
1575 
1576 			host_write_num++;
1577 			host_write_num = host_write_num % num_entries;
1578 		}
1579 
1580 		if (is_header) {
1581 			pasync_ctx->async_header.host_write_ptr =
1582 							host_write_num;
1583 			pasync_ctx->async_header.free_entries -= writables;
1584 			pasync_ctx->async_header.writables -= writables;
1585 			pasync_ctx->async_header.busy_entries += writables;
1586 		} else {
1587 			pasync_ctx->async_data.host_write_ptr = host_write_num;
1588 			pasync_ctx->async_data.free_entries -= writables;
1589 			pasync_ctx->async_data.writables -= writables;
1590 			pasync_ctx->async_data.busy_entries += writables;
1591 		}
1592 
1593 		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1594 		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1595 		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1596 		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1597 					<< DB_DEF_PDU_CQPROC_SHIFT;
1598 
1599 		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1600 	}
1601 }
1602 
1603 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1604 					 struct beiscsi_conn *beiscsi_conn,
1605 					 struct i_t_dpdu_cqe *pdpdu_cqe)
1606 {
1607 	struct hwi_controller *phwi_ctrlr;
1608 	struct hwi_async_pdu_context *pasync_ctx;
1609 	struct async_pdu_handle *pasync_handle = NULL;
1610 	unsigned int cq_index = -1;
1611 
1612 	phwi_ctrlr = phba->phwi_ctrlr;
1613 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1614 
1615 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1616 					     pdpdu_cqe, &cq_index);
1617 	BUG_ON(pasync_handle->is_header != 0);
1618 	if (pasync_handle->consumed == 0)
1619 		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1620 					   cq_index);
1621 
1622 	hwi_free_async_msg(phba, pasync_handle->cri);
1623 	hwi_post_async_buffers(phba, pasync_handle->is_header);
1624 }
1625 
1626 static unsigned int
1627 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1628 		  struct beiscsi_hba *phba,
1629 		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1630 {
1631 	struct list_head *plist;
1632 	struct async_pdu_handle *pasync_handle;
1633 	void *phdr = NULL;
1634 	unsigned int hdr_len = 0, buf_len = 0;
1635 	unsigned int status, index = 0, offset = 0;
1636 	void *pfirst_buffer = NULL;
1637 	unsigned int num_buf = 0;
1638 
1639 	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1640 
1641 	list_for_each_entry(pasync_handle, plist, link) {
1642 		if (index == 0) {
1643 			phdr = pasync_handle->pbuffer;
1644 			hdr_len = pasync_handle->buffer_len;
1645 		} else {
1646 			buf_len = pasync_handle->buffer_len;
1647 			if (!num_buf) {
1648 				pfirst_buffer = pasync_handle->pbuffer;
1649 				num_buf++;
1650 			}
1651 			memcpy(pfirst_buffer + offset,
1652 			       pasync_handle->pbuffer, buf_len);
1653 			offset += buf_len;
1654 		}
1655 		index++;
1656 	}
1657 
1658 	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1659 					   (beiscsi_conn->beiscsi_conn_cid -
1660 					    phba->fw_config.iscsi_cid_start),
1661 					    phdr, hdr_len, pfirst_buffer,
1662 					    offset);
1663 
1664 	hwi_free_async_msg(phba, cri);
1665 	return 0;
1666 }
1667 
1668 static unsigned int
1669 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1670 		     struct beiscsi_hba *phba,
1671 		     struct async_pdu_handle *pasync_handle)
1672 {
1673 	struct hwi_async_pdu_context *pasync_ctx;
1674 	struct hwi_controller *phwi_ctrlr;
1675 	unsigned int bytes_needed = 0, status = 0;
1676 	unsigned short cri = pasync_handle->cri;
1677 	struct pdu_base *ppdu;
1678 
1679 	phwi_ctrlr = phba->phwi_ctrlr;
1680 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1681 
1682 	list_del(&pasync_handle->link);
1683 	if (pasync_handle->is_header) {
1684 		pasync_ctx->async_header.busy_entries--;
1685 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1686 			hwi_free_async_msg(phba, cri);
1687 			BUG();
1688 		}
1689 
1690 		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1691 		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1692 		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1693 				(unsigned short)pasync_handle->buffer_len;
1694 		list_add_tail(&pasync_handle->link,
1695 			      &pasync_ctx->async_entry[cri].wait_queue.list);
1696 
1697 		ppdu = pasync_handle->pbuffer;
1698 		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1699 			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1700 			0xFFFF0000) | ((be16_to_cpu((ppdu->
1701 			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1702 			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1703 
1704 		if (status == 0) {
1705 			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1706 			    bytes_needed;
1707 
1708 			if (bytes_needed == 0)
1709 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1710 							   pasync_ctx, cri);
1711 		}
1712 	} else {
1713 		pasync_ctx->async_data.busy_entries--;
1714 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1715 			list_add_tail(&pasync_handle->link,
1716 				      &pasync_ctx->async_entry[cri].wait_queue.
1717 				      list);
1718 			pasync_ctx->async_entry[cri].wait_queue.
1719 				bytes_received +=
1720 				(unsigned short)pasync_handle->buffer_len;
1721 
1722 			if (pasync_ctx->async_entry[cri].wait_queue.
1723 			    bytes_received >=
1724 			    pasync_ctx->async_entry[cri].wait_queue.
1725 			    bytes_needed)
1726 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1727 							   pasync_ctx, cri);
1728 		}
1729 	}
1730 	return status;
1731 }
1732 
1733 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1734 					 struct beiscsi_hba *phba,
1735 					 struct i_t_dpdu_cqe *pdpdu_cqe)
1736 {
1737 	struct hwi_controller *phwi_ctrlr;
1738 	struct hwi_async_pdu_context *pasync_ctx;
1739 	struct async_pdu_handle *pasync_handle = NULL;
1740 	unsigned int cq_index = -1;
1741 
1742 	phwi_ctrlr = phba->phwi_ctrlr;
1743 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1744 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1745 					     pdpdu_cqe, &cq_index);
1746 
1747 	if (pasync_handle->consumed == 0)
1748 		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1749 					   cq_index);
1750 	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1751 	hwi_post_async_buffers(phba, pasync_handle->is_header);
1752 }
1753 
1754 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1755 {
1756 	struct be_queue_info *mcc_cq;
1757 	struct  be_mcc_compl *mcc_compl;
1758 	unsigned int num_processed = 0;
1759 
1760 	mcc_cq = &phba->ctrl.mcc_obj.cq;
1761 	mcc_compl = queue_tail_node(mcc_cq);
1762 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1763 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1764 
1765 		if (num_processed >= 32) {
1766 			hwi_ring_cq_db(phba, mcc_cq->id,
1767 					num_processed, 0, 0);
1768 			num_processed = 0;
1769 		}
1770 		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1771 			/* Interpret flags as an async trailer */
1772 			if (is_link_state_evt(mcc_compl->flags))
1773 				/* Interpret compl as a async link evt */
1774 				beiscsi_async_link_state_process(phba,
1775 				(struct be_async_event_link_state *) mcc_compl);
1776 			else
1777 				SE_DEBUG(DBG_LVL_1,
1778 					" Unsupported Async Event, flags"
1779 					" = 0x%08x\n", mcc_compl->flags);
1780 		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1781 			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1782 			atomic_dec(&phba->ctrl.mcc_obj.q.used);
1783 		}
1784 
1785 		mcc_compl->flags = 0;
1786 		queue_tail_inc(mcc_cq);
1787 		mcc_compl = queue_tail_node(mcc_cq);
1788 		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1789 		num_processed++;
1790 	}
1791 
1792 	if (num_processed > 0)
1793 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1794 
1795 }
1796 
1797 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1798 {
1799 	struct be_queue_info *cq;
1800 	struct sol_cqe *sol;
1801 	struct dmsg_cqe *dmsg;
1802 	unsigned int num_processed = 0;
1803 	unsigned int tot_nump = 0;
1804 	struct beiscsi_conn *beiscsi_conn;
1805 	struct beiscsi_endpoint *beiscsi_ep;
1806 	struct iscsi_endpoint *ep;
1807 	struct beiscsi_hba *phba;
1808 
1809 	cq = pbe_eq->cq;
1810 	sol = queue_tail_node(cq);
1811 	phba = pbe_eq->phba;
1812 
1813 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1814 	       CQE_VALID_MASK) {
1815 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1816 
1817 		ep = phba->ep_array[(u32) ((sol->
1818 				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1819 				   SOL_CID_MASK) >> 6) -
1820 				   phba->fw_config.iscsi_cid_start];
1821 
1822 		beiscsi_ep = ep->dd_data;
1823 		beiscsi_conn = beiscsi_ep->conn;
1824 
1825 		if (num_processed >= 32) {
1826 			hwi_ring_cq_db(phba, cq->id,
1827 					num_processed, 0, 0);
1828 			tot_nump += num_processed;
1829 			num_processed = 0;
1830 		}
1831 
1832 		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1833 			32] & CQE_CODE_MASK) {
1834 		case SOL_CMD_COMPLETE:
1835 			hwi_complete_cmd(beiscsi_conn, phba, sol);
1836 			break;
1837 		case DRIVERMSG_NOTIFY:
1838 			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1839 			dmsg = (struct dmsg_cqe *)sol;
1840 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1841 			break;
1842 		case UNSOL_HDR_NOTIFY:
1843 			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1844 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1845 					     (struct i_t_dpdu_cqe *)sol);
1846 			break;
1847 		case UNSOL_DATA_NOTIFY:
1848 			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1849 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1850 					     (struct i_t_dpdu_cqe *)sol);
1851 			break;
1852 		case CXN_INVALIDATE_INDEX_NOTIFY:
1853 		case CMD_INVALIDATED_NOTIFY:
1854 		case CXN_INVALIDATE_NOTIFY:
1855 			SE_DEBUG(DBG_LVL_1,
1856 				 "Ignoring CQ Error notification for cmd/cxn"
1857 				 "invalidate\n");
1858 			break;
1859 		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1860 		case CMD_KILLED_INVALID_STATSN_RCVD:
1861 		case CMD_KILLED_INVALID_R2T_RCVD:
1862 		case CMD_CXN_KILLED_LUN_INVALID:
1863 		case CMD_CXN_KILLED_ICD_INVALID:
1864 		case CMD_CXN_KILLED_ITT_INVALID:
1865 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1866 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1867 			SE_DEBUG(DBG_LVL_1,
1868 				 "CQ Error notification for cmd.. "
1869 				 "code %d cid 0x%x\n",
1870 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1871 				 32] & CQE_CODE_MASK,
1872 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1873 				 32] & SOL_CID_MASK));
1874 			break;
1875 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1876 			SE_DEBUG(DBG_LVL_1,
1877 				 "Digest error on def pdu ring, dropping..\n");
1878 			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1879 					     (struct i_t_dpdu_cqe *) sol);
1880 			break;
1881 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1882 		case CXN_KILLED_BURST_LEN_MISMATCH:
1883 		case CXN_KILLED_AHS_RCVD:
1884 		case CXN_KILLED_HDR_DIGEST_ERR:
1885 		case CXN_KILLED_UNKNOWN_HDR:
1886 		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1887 		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1888 		case CXN_KILLED_TIMED_OUT:
1889 		case CXN_KILLED_FIN_RCVD:
1890 		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1891 		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1892 		case CXN_KILLED_OVER_RUN_RESIDUAL:
1893 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1894 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1895 			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1896 				 "0x%x...\n",
1897 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1898 				 32] & CQE_CODE_MASK,
1899 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1900 				 32] & CQE_CID_MASK));
1901 			iscsi_conn_failure(beiscsi_conn->conn,
1902 					   ISCSI_ERR_CONN_FAILED);
1903 			break;
1904 		case CXN_KILLED_RST_SENT:
1905 		case CXN_KILLED_RST_RCVD:
1906 			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1907 				"received/sent on CID 0x%x...\n",
1908 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1909 				 32] & CQE_CODE_MASK,
1910 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1911 				 32] & CQE_CID_MASK));
1912 			iscsi_conn_failure(beiscsi_conn->conn,
1913 					   ISCSI_ERR_CONN_FAILED);
1914 			break;
1915 		default:
1916 			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1917 				 "received on CID 0x%x...\n",
1918 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1919 				 32] & CQE_CODE_MASK,
1920 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1921 				 32] & CQE_CID_MASK));
1922 			break;
1923 		}
1924 
1925 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1926 		queue_tail_inc(cq);
1927 		sol = queue_tail_node(cq);
1928 		num_processed++;
1929 	}
1930 
1931 	if (num_processed > 0) {
1932 		tot_nump += num_processed;
1933 		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1934 	}
1935 	return tot_nump;
1936 }
1937 
1938 void beiscsi_process_all_cqs(struct work_struct *work)
1939 {
1940 	unsigned long flags;
1941 	struct hwi_controller *phwi_ctrlr;
1942 	struct hwi_context_memory *phwi_context;
1943 	struct be_eq_obj *pbe_eq;
1944 	struct beiscsi_hba *phba =
1945 	    container_of(work, struct beiscsi_hba, work_cqs);
1946 
1947 	phwi_ctrlr = phba->phwi_ctrlr;
1948 	phwi_context = phwi_ctrlr->phwi_ctxt;
1949 	if (phba->msix_enabled)
1950 		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1951 	else
1952 		pbe_eq = &phwi_context->be_eq[0];
1953 
1954 	if (phba->todo_mcc_cq) {
1955 		spin_lock_irqsave(&phba->isr_lock, flags);
1956 		phba->todo_mcc_cq = 0;
1957 		spin_unlock_irqrestore(&phba->isr_lock, flags);
1958 		beiscsi_process_mcc_isr(phba);
1959 	}
1960 
1961 	if (phba->todo_cq) {
1962 		spin_lock_irqsave(&phba->isr_lock, flags);
1963 		phba->todo_cq = 0;
1964 		spin_unlock_irqrestore(&phba->isr_lock, flags);
1965 		beiscsi_process_cq(pbe_eq);
1966 	}
1967 }
1968 
1969 static int be_iopoll(struct blk_iopoll *iop, int budget)
1970 {
1971 	static unsigned int ret;
1972 	struct beiscsi_hba *phba;
1973 	struct be_eq_obj *pbe_eq;
1974 
1975 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1976 	ret = beiscsi_process_cq(pbe_eq);
1977 	if (ret < budget) {
1978 		phba = pbe_eq->phba;
1979 		blk_iopoll_complete(iop);
1980 		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1981 		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1982 	}
1983 	return ret;
1984 }
1985 
1986 static void
1987 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1988 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
1989 {
1990 	struct iscsi_sge *psgl;
1991 	unsigned int sg_len, index;
1992 	unsigned int sge_len = 0;
1993 	unsigned long long addr;
1994 	struct scatterlist *l_sg;
1995 	unsigned int offset;
1996 
1997 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1998 				      io_task->bhs_pa.u.a32.address_lo);
1999 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2000 				      io_task->bhs_pa.u.a32.address_hi);
2001 
2002 	l_sg = sg;
2003 	for (index = 0; (index < num_sg) && (index < 2); index++,
2004 							 sg = sg_next(sg)) {
2005 		if (index == 0) {
2006 			sg_len = sg_dma_len(sg);
2007 			addr = (u64) sg_dma_address(sg);
2008 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2009 						((u32)(addr & 0xFFFFFFFF)));
2010 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2011 							((u32)(addr >> 32)));
2012 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2013 							sg_len);
2014 			sge_len = sg_len;
2015 		} else {
2016 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2017 							pwrb, sge_len);
2018 			sg_len = sg_dma_len(sg);
2019 			addr = (u64) sg_dma_address(sg);
2020 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2021 						((u32)(addr & 0xFFFFFFFF)));
2022 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2023 							((u32)(addr >> 32)));
2024 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2025 							sg_len);
2026 		}
2027 	}
2028 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2029 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2030 
2031 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2032 
2033 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2034 			io_task->bhs_pa.u.a32.address_hi);
2035 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2036 			io_task->bhs_pa.u.a32.address_lo);
2037 
2038 	if (num_sg == 1) {
2039 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2040 								1);
2041 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2042 								0);
2043 	} else if (num_sg == 2) {
2044 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2045 								0);
2046 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2047 								1);
2048 	} else {
2049 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2050 								0);
2051 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2052 								0);
2053 	}
2054 	sg = l_sg;
2055 	psgl++;
2056 	psgl++;
2057 	offset = 0;
2058 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2059 		sg_len = sg_dma_len(sg);
2060 		addr = (u64) sg_dma_address(sg);
2061 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2062 						(addr & 0xFFFFFFFF));
2063 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2064 						(addr >> 32));
2065 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2066 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2067 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2068 		offset += sg_len;
2069 	}
2070 	psgl--;
2071 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2072 }
2073 
2074 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2075 {
2076 	struct iscsi_sge *psgl;
2077 	unsigned long long addr;
2078 	struct beiscsi_io_task *io_task = task->dd_data;
2079 	struct beiscsi_conn *beiscsi_conn = io_task->conn;
2080 	struct beiscsi_hba *phba = beiscsi_conn->phba;
2081 
2082 	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2083 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2084 				io_task->bhs_pa.u.a32.address_lo);
2085 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2086 				io_task->bhs_pa.u.a32.address_hi);
2087 
2088 	if (task->data) {
2089 		if (task->data_count) {
2090 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2091 			addr = (u64) pci_map_single(phba->pcidev,
2092 						    task->data,
2093 						    task->data_count, 1);
2094 		} else {
2095 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2096 			addr = 0;
2097 		}
2098 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2099 						((u32)(addr & 0xFFFFFFFF)));
2100 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2101 						((u32)(addr >> 32)));
2102 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2103 						task->data_count);
2104 
2105 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2106 	} else {
2107 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2108 		addr = 0;
2109 	}
2110 
2111 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2112 
2113 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2114 
2115 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2116 		      io_task->bhs_pa.u.a32.address_hi);
2117 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2118 		      io_task->bhs_pa.u.a32.address_lo);
2119 	if (task->data) {
2120 		psgl++;
2121 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2122 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2123 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2124 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2125 		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2126 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2127 
2128 		psgl++;
2129 		if (task->data) {
2130 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2131 						((u32)(addr & 0xFFFFFFFF)));
2132 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2133 						((u32)(addr >> 32)));
2134 		}
2135 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2136 	}
2137 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2138 }
2139 
2140 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2141 {
2142 	unsigned int num_cq_pages, num_async_pdu_buf_pages;
2143 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2144 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2145 
2146 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2147 				      sizeof(struct sol_cqe));
2148 	num_async_pdu_buf_pages =
2149 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2150 				       phba->params.defpdu_hdr_sz);
2151 	num_async_pdu_buf_sgl_pages =
2152 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2153 				       sizeof(struct phys_addr));
2154 	num_async_pdu_data_pages =
2155 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2156 				       phba->params.defpdu_data_sz);
2157 	num_async_pdu_data_sgl_pages =
2158 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2159 				       sizeof(struct phys_addr));
2160 
2161 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2162 
2163 	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2164 						 BE_ISCSI_PDU_HEADER_SIZE;
2165 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2166 					    sizeof(struct hwi_context_memory);
2167 
2168 
2169 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2170 	    * (phba->params.wrbs_per_cxn)
2171 	    * phba->params.cxns_per_ctrl;
2172 	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2173 				 (phba->params.wrbs_per_cxn);
2174 	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2175 				phba->params.cxns_per_ctrl);
2176 
2177 	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2178 		phba->params.icds_per_ctrl;
2179 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2180 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2181 
2182 	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2183 		num_async_pdu_buf_pages * PAGE_SIZE;
2184 	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2185 		num_async_pdu_data_pages * PAGE_SIZE;
2186 	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2187 		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2188 	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2189 		num_async_pdu_data_sgl_pages * PAGE_SIZE;
2190 	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2191 		phba->params.asyncpdus_per_ctrl *
2192 		sizeof(struct async_pdu_handle);
2193 	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2194 		phba->params.asyncpdus_per_ctrl *
2195 		sizeof(struct async_pdu_handle);
2196 	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2197 		sizeof(struct hwi_async_pdu_context) +
2198 		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2199 }
2200 
2201 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2202 {
2203 	struct be_mem_descriptor *mem_descr;
2204 	dma_addr_t bus_add;
2205 	struct mem_array *mem_arr, *mem_arr_orig;
2206 	unsigned int i, j, alloc_size, curr_alloc_size;
2207 
2208 	phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2209 	if (!phba->phwi_ctrlr)
2210 		return -ENOMEM;
2211 
2212 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2213 				 GFP_KERNEL);
2214 	if (!phba->init_mem) {
2215 		kfree(phba->phwi_ctrlr);
2216 		return -ENOMEM;
2217 	}
2218 
2219 	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2220 			       GFP_KERNEL);
2221 	if (!mem_arr_orig) {
2222 		kfree(phba->init_mem);
2223 		kfree(phba->phwi_ctrlr);
2224 		return -ENOMEM;
2225 	}
2226 
2227 	mem_descr = phba->init_mem;
2228 	for (i = 0; i < SE_MEM_MAX; i++) {
2229 		j = 0;
2230 		mem_arr = mem_arr_orig;
2231 		alloc_size = phba->mem_req[i];
2232 		memset(mem_arr, 0, sizeof(struct mem_array) *
2233 		       BEISCSI_MAX_FRAGS_INIT);
2234 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2235 		do {
2236 			mem_arr->virtual_address = pci_alloc_consistent(
2237 							phba->pcidev,
2238 							curr_alloc_size,
2239 							&bus_add);
2240 			if (!mem_arr->virtual_address) {
2241 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2242 					goto free_mem;
2243 				if (curr_alloc_size -
2244 					rounddown_pow_of_two(curr_alloc_size))
2245 					curr_alloc_size = rounddown_pow_of_two
2246 							     (curr_alloc_size);
2247 				else
2248 					curr_alloc_size = curr_alloc_size / 2;
2249 			} else {
2250 				mem_arr->bus_address.u.
2251 				    a64.address = (__u64) bus_add;
2252 				mem_arr->size = curr_alloc_size;
2253 				alloc_size -= curr_alloc_size;
2254 				curr_alloc_size = min(be_max_phys_size *
2255 						      1024, alloc_size);
2256 				j++;
2257 				mem_arr++;
2258 			}
2259 		} while (alloc_size);
2260 		mem_descr->num_elements = j;
2261 		mem_descr->size_in_bytes = phba->mem_req[i];
2262 		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2263 					       GFP_KERNEL);
2264 		if (!mem_descr->mem_array)
2265 			goto free_mem;
2266 
2267 		memcpy(mem_descr->mem_array, mem_arr_orig,
2268 		       sizeof(struct mem_array) * j);
2269 		mem_descr++;
2270 	}
2271 	kfree(mem_arr_orig);
2272 	return 0;
2273 free_mem:
2274 	mem_descr->num_elements = j;
2275 	while ((i) || (j)) {
2276 		for (j = mem_descr->num_elements; j > 0; j--) {
2277 			pci_free_consistent(phba->pcidev,
2278 					    mem_descr->mem_array[j - 1].size,
2279 					    mem_descr->mem_array[j - 1].
2280 					    virtual_address,
2281 					    (unsigned long)mem_descr->
2282 					    mem_array[j - 1].
2283 					    bus_address.u.a64.address);
2284 		}
2285 		if (i) {
2286 			i--;
2287 			kfree(mem_descr->mem_array);
2288 			mem_descr--;
2289 		}
2290 	}
2291 	kfree(mem_arr_orig);
2292 	kfree(phba->init_mem);
2293 	kfree(phba->phwi_ctrlr);
2294 	return -ENOMEM;
2295 }
2296 
2297 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2298 {
2299 	beiscsi_find_mem_req(phba);
2300 	return beiscsi_alloc_mem(phba);
2301 }
2302 
2303 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2304 {
2305 	struct pdu_data_out *pdata_out;
2306 	struct pdu_nop_out *pnop_out;
2307 	struct be_mem_descriptor *mem_descr;
2308 
2309 	mem_descr = phba->init_mem;
2310 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2311 	pdata_out =
2312 	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2313 	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2314 
2315 	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2316 		      IIOC_SCSI_DATA);
2317 
2318 	pnop_out =
2319 	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2320 				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2321 
2322 	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2323 	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2324 	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2325 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2326 }
2327 
2328 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2329 {
2330 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2331 	struct wrb_handle *pwrb_handle = NULL;
2332 	struct hwi_controller *phwi_ctrlr;
2333 	struct hwi_wrb_context *pwrb_context;
2334 	struct iscsi_wrb *pwrb = NULL;
2335 	unsigned int num_cxn_wrbh = 0;
2336 	unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2337 
2338 	mem_descr_wrbh = phba->init_mem;
2339 	mem_descr_wrbh += HWI_MEM_WRBH;
2340 
2341 	mem_descr_wrb = phba->init_mem;
2342 	mem_descr_wrb += HWI_MEM_WRB;
2343 	phwi_ctrlr = phba->phwi_ctrlr;
2344 
2345 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2346 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2347 		pwrb_context->pwrb_handle_base =
2348 				kzalloc(sizeof(struct wrb_handle *) *
2349 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2350 		if (!pwrb_context->pwrb_handle_base) {
2351 			shost_printk(KERN_ERR, phba->shost,
2352 					"Mem Alloc Failed. Failing to load\n");
2353 			goto init_wrb_hndl_failed;
2354 		}
2355 		pwrb_context->pwrb_handle_basestd =
2356 				kzalloc(sizeof(struct wrb_handle *) *
2357 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2358 		if (!pwrb_context->pwrb_handle_basestd) {
2359 			shost_printk(KERN_ERR, phba->shost,
2360 					"Mem Alloc Failed. Failing to load\n");
2361 			goto init_wrb_hndl_failed;
2362 		}
2363 		if (!num_cxn_wrbh) {
2364 			pwrb_handle =
2365 				mem_descr_wrbh->mem_array[idx].virtual_address;
2366 			num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2367 					((sizeof(struct wrb_handle)) *
2368 					 phba->params.wrbs_per_cxn));
2369 			idx++;
2370 		}
2371 		pwrb_context->alloc_index = 0;
2372 		pwrb_context->wrb_handles_available = 0;
2373 		pwrb_context->free_index = 0;
2374 
2375 		if (num_cxn_wrbh) {
2376 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2377 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2378 				pwrb_context->pwrb_handle_basestd[j] =
2379 								pwrb_handle;
2380 				pwrb_context->wrb_handles_available++;
2381 				pwrb_handle->wrb_index = j;
2382 				pwrb_handle++;
2383 			}
2384 			num_cxn_wrbh--;
2385 		}
2386 	}
2387 	idx = 0;
2388 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2389 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2390 		if (!num_cxn_wrb) {
2391 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2392 			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2393 				((sizeof(struct iscsi_wrb) *
2394 				  phba->params.wrbs_per_cxn));
2395 			idx++;
2396 		}
2397 
2398 		if (num_cxn_wrb) {
2399 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2400 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2401 				pwrb_handle->pwrb = pwrb;
2402 				pwrb++;
2403 			}
2404 			num_cxn_wrb--;
2405 		}
2406 	}
2407 	return 0;
2408 init_wrb_hndl_failed:
2409 	for (j = index; j > 0; j--) {
2410 		pwrb_context = &phwi_ctrlr->wrb_context[j];
2411 		kfree(pwrb_context->pwrb_handle_base);
2412 		kfree(pwrb_context->pwrb_handle_basestd);
2413 	}
2414 	return -ENOMEM;
2415 }
2416 
2417 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2418 {
2419 	struct hwi_controller *phwi_ctrlr;
2420 	struct hba_parameters *p = &phba->params;
2421 	struct hwi_async_pdu_context *pasync_ctx;
2422 	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2423 	unsigned int index, idx, num_per_mem, num_async_data;
2424 	struct be_mem_descriptor *mem_descr;
2425 
2426 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2427 	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2428 
2429 	phwi_ctrlr = phba->phwi_ctrlr;
2430 	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2431 				mem_descr->mem_array[0].virtual_address;
2432 	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2433 	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2434 
2435 	pasync_ctx->num_entries = p->asyncpdus_per_ctrl;
2436 	pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2437 
2438 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2439 	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2440 	if (mem_descr->mem_array[0].virtual_address) {
2441 		SE_DEBUG(DBG_LVL_8,
2442 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2443 			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2444 	} else
2445 		shost_printk(KERN_WARNING, phba->shost,
2446 			     "No Virtual address\n");
2447 
2448 	pasync_ctx->async_header.va_base =
2449 			mem_descr->mem_array[0].virtual_address;
2450 
2451 	pasync_ctx->async_header.pa_base.u.a64.address =
2452 			mem_descr->mem_array[0].bus_address.u.a64.address;
2453 
2454 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2455 	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2456 	if (mem_descr->mem_array[0].virtual_address) {
2457 		SE_DEBUG(DBG_LVL_8,
2458 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2459 			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2460 	} else
2461 		shost_printk(KERN_WARNING, phba->shost,
2462 			    "No Virtual address\n");
2463 	pasync_ctx->async_header.ring_base =
2464 			mem_descr->mem_array[0].virtual_address;
2465 
2466 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2467 	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2468 	if (mem_descr->mem_array[0].virtual_address) {
2469 		SE_DEBUG(DBG_LVL_8,
2470 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2471 			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2472 	} else
2473 		shost_printk(KERN_WARNING, phba->shost,
2474 			    "No Virtual address\n");
2475 
2476 	pasync_ctx->async_header.handle_base =
2477 			mem_descr->mem_array[0].virtual_address;
2478 	pasync_ctx->async_header.writables = 0;
2479 	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2480 
2481 
2482 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2483 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2484 	if (mem_descr->mem_array[0].virtual_address) {
2485 		SE_DEBUG(DBG_LVL_8,
2486 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2487 			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2488 	} else
2489 		shost_printk(KERN_WARNING, phba->shost,
2490 			     "No Virtual address\n");
2491 
2492 	pasync_ctx->async_data.ring_base =
2493 			mem_descr->mem_array[0].virtual_address;
2494 
2495 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2496 	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2497 	if (!mem_descr->mem_array[0].virtual_address)
2498 		shost_printk(KERN_WARNING, phba->shost,
2499 			    "No Virtual address\n");
2500 
2501 	pasync_ctx->async_data.handle_base =
2502 			mem_descr->mem_array[0].virtual_address;
2503 	pasync_ctx->async_data.writables = 0;
2504 	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2505 
2506 	pasync_header_h =
2507 		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2508 	pasync_data_h =
2509 		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2510 
2511 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2512 	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2513 	if (mem_descr->mem_array[0].virtual_address) {
2514 		SE_DEBUG(DBG_LVL_8,
2515 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2516 			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2517 	} else
2518 		shost_printk(KERN_WARNING, phba->shost,
2519 			    "No Virtual address\n");
2520 	idx = 0;
2521 	pasync_ctx->async_data.va_base =
2522 			mem_descr->mem_array[idx].virtual_address;
2523 	pasync_ctx->async_data.pa_base.u.a64.address =
2524 			mem_descr->mem_array[idx].bus_address.u.a64.address;
2525 
2526 	num_async_data = ((mem_descr->mem_array[idx].size) /
2527 				phba->params.defpdu_data_sz);
2528 	num_per_mem = 0;
2529 
2530 	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2531 		pasync_header_h->cri = -1;
2532 		pasync_header_h->index = (char)index;
2533 		INIT_LIST_HEAD(&pasync_header_h->link);
2534 		pasync_header_h->pbuffer =
2535 			(void *)((unsigned long)
2536 			(pasync_ctx->async_header.va_base) +
2537 			(p->defpdu_hdr_sz * index));
2538 
2539 		pasync_header_h->pa.u.a64.address =
2540 			pasync_ctx->async_header.pa_base.u.a64.address +
2541 			(p->defpdu_hdr_sz * index);
2542 
2543 		list_add_tail(&pasync_header_h->link,
2544 				&pasync_ctx->async_header.free_list);
2545 		pasync_header_h++;
2546 		pasync_ctx->async_header.free_entries++;
2547 		pasync_ctx->async_header.writables++;
2548 
2549 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2550 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2551 			       header_busy_list);
2552 		pasync_data_h->cri = -1;
2553 		pasync_data_h->index = (char)index;
2554 		INIT_LIST_HEAD(&pasync_data_h->link);
2555 
2556 		if (!num_async_data) {
2557 			num_per_mem = 0;
2558 			idx++;
2559 			pasync_ctx->async_data.va_base =
2560 				mem_descr->mem_array[idx].virtual_address;
2561 			pasync_ctx->async_data.pa_base.u.a64.address =
2562 				mem_descr->mem_array[idx].
2563 				bus_address.u.a64.address;
2564 
2565 			num_async_data = ((mem_descr->mem_array[idx].size) /
2566 					phba->params.defpdu_data_sz);
2567 		}
2568 		pasync_data_h->pbuffer =
2569 			(void *)((unsigned long)
2570 			(pasync_ctx->async_data.va_base) +
2571 			(p->defpdu_data_sz * num_per_mem));
2572 
2573 		pasync_data_h->pa.u.a64.address =
2574 		    pasync_ctx->async_data.pa_base.u.a64.address +
2575 		    (p->defpdu_data_sz * num_per_mem);
2576 		num_per_mem++;
2577 		num_async_data--;
2578 
2579 		list_add_tail(&pasync_data_h->link,
2580 			      &pasync_ctx->async_data.free_list);
2581 		pasync_data_h++;
2582 		pasync_ctx->async_data.free_entries++;
2583 		pasync_ctx->async_data.writables++;
2584 
2585 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2586 	}
2587 
2588 	pasync_ctx->async_header.host_write_ptr = 0;
2589 	pasync_ctx->async_header.ep_read_ptr = -1;
2590 	pasync_ctx->async_data.host_write_ptr = 0;
2591 	pasync_ctx->async_data.ep_read_ptr = -1;
2592 }
2593 
2594 static int
2595 be_sgl_create_contiguous(void *virtual_address,
2596 			 u64 physical_address, u32 length,
2597 			 struct be_dma_mem *sgl)
2598 {
2599 	WARN_ON(!virtual_address);
2600 	WARN_ON(!physical_address);
2601 	WARN_ON(!length > 0);
2602 	WARN_ON(!sgl);
2603 
2604 	sgl->va = virtual_address;
2605 	sgl->dma = (unsigned long)physical_address;
2606 	sgl->size = length;
2607 
2608 	return 0;
2609 }
2610 
2611 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2612 {
2613 	memset(sgl, 0, sizeof(*sgl));
2614 }
2615 
2616 static void
2617 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2618 		     struct mem_array *pmem, struct be_dma_mem *sgl)
2619 {
2620 	if (sgl->va)
2621 		be_sgl_destroy_contiguous(sgl);
2622 
2623 	be_sgl_create_contiguous(pmem->virtual_address,
2624 				 pmem->bus_address.u.a64.address,
2625 				 pmem->size, sgl);
2626 }
2627 
2628 static void
2629 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2630 			   struct mem_array *pmem, struct be_dma_mem *sgl)
2631 {
2632 	if (sgl->va)
2633 		be_sgl_destroy_contiguous(sgl);
2634 
2635 	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2636 				 pmem->bus_address.u.a64.address,
2637 				 pmem->size, sgl);
2638 }
2639 
2640 static int be_fill_queue(struct be_queue_info *q,
2641 		u16 len, u16 entry_size, void *vaddress)
2642 {
2643 	struct be_dma_mem *mem = &q->dma_mem;
2644 
2645 	memset(q, 0, sizeof(*q));
2646 	q->len = len;
2647 	q->entry_size = entry_size;
2648 	mem->size = len * entry_size;
2649 	mem->va = vaddress;
2650 	if (!mem->va)
2651 		return -ENOMEM;
2652 	memset(mem->va, 0, mem->size);
2653 	return 0;
2654 }
2655 
2656 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2657 			     struct hwi_context_memory *phwi_context)
2658 {
2659 	unsigned int i, num_eq_pages;
2660 	int ret, eq_for_mcc;
2661 	struct be_queue_info *eq;
2662 	struct be_dma_mem *mem;
2663 	void *eq_vaddress;
2664 	dma_addr_t paddr;
2665 
2666 	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2667 				      sizeof(struct be_eq_entry));
2668 
2669 	if (phba->msix_enabled)
2670 		eq_for_mcc = 1;
2671 	else
2672 		eq_for_mcc = 0;
2673 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2674 		eq = &phwi_context->be_eq[i].q;
2675 		mem = &eq->dma_mem;
2676 		phwi_context->be_eq[i].phba = phba;
2677 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2678 						     num_eq_pages * PAGE_SIZE,
2679 						     &paddr);
2680 		if (!eq_vaddress)
2681 			goto create_eq_error;
2682 
2683 		mem->va = eq_vaddress;
2684 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2685 				    sizeof(struct be_eq_entry), eq_vaddress);
2686 		if (ret) {
2687 			shost_printk(KERN_ERR, phba->shost,
2688 				     "be_fill_queue Failed for EQ\n");
2689 			goto create_eq_error;
2690 		}
2691 
2692 		mem->dma = paddr;
2693 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2694 					    phwi_context->cur_eqd);
2695 		if (ret) {
2696 			shost_printk(KERN_ERR, phba->shost,
2697 				     "beiscsi_cmd_eq_create"
2698 				     "Failedfor EQ\n");
2699 			goto create_eq_error;
2700 		}
2701 		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2702 	}
2703 	return 0;
2704 create_eq_error:
2705 	for (i = 0; i < (phba->num_cpus + 1); i++) {
2706 		eq = &phwi_context->be_eq[i].q;
2707 		mem = &eq->dma_mem;
2708 		if (mem->va)
2709 			pci_free_consistent(phba->pcidev, num_eq_pages
2710 					    * PAGE_SIZE,
2711 					    mem->va, mem->dma);
2712 	}
2713 	return ret;
2714 }
2715 
2716 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2717 			     struct hwi_context_memory *phwi_context)
2718 {
2719 	unsigned int i, num_cq_pages;
2720 	int ret;
2721 	struct be_queue_info *cq, *eq;
2722 	struct be_dma_mem *mem;
2723 	struct be_eq_obj *pbe_eq;
2724 	void *cq_vaddress;
2725 	dma_addr_t paddr;
2726 
2727 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2728 				      sizeof(struct sol_cqe));
2729 
2730 	for (i = 0; i < phba->num_cpus; i++) {
2731 		cq = &phwi_context->be_cq[i];
2732 		eq = &phwi_context->be_eq[i].q;
2733 		pbe_eq = &phwi_context->be_eq[i];
2734 		pbe_eq->cq = cq;
2735 		pbe_eq->phba = phba;
2736 		mem = &cq->dma_mem;
2737 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2738 						     num_cq_pages * PAGE_SIZE,
2739 						     &paddr);
2740 		if (!cq_vaddress)
2741 			goto create_cq_error;
2742 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2743 				    sizeof(struct sol_cqe), cq_vaddress);
2744 		if (ret) {
2745 			shost_printk(KERN_ERR, phba->shost,
2746 				     "be_fill_queue Failed for ISCSI CQ\n");
2747 			goto create_cq_error;
2748 		}
2749 
2750 		mem->dma = paddr;
2751 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2752 					    false, 0);
2753 		if (ret) {
2754 			shost_printk(KERN_ERR, phba->shost,
2755 				     "beiscsi_cmd_eq_create"
2756 				     "Failed for ISCSI CQ\n");
2757 			goto create_cq_error;
2758 		}
2759 		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2760 						 cq->id, eq->id);
2761 		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2762 	}
2763 	return 0;
2764 
2765 create_cq_error:
2766 	for (i = 0; i < phba->num_cpus; i++) {
2767 		cq = &phwi_context->be_cq[i];
2768 		mem = &cq->dma_mem;
2769 		if (mem->va)
2770 			pci_free_consistent(phba->pcidev, num_cq_pages
2771 					    * PAGE_SIZE,
2772 					    mem->va, mem->dma);
2773 	}
2774 	return ret;
2775 
2776 }
2777 
2778 static int
2779 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2780 		       struct hwi_context_memory *phwi_context,
2781 		       struct hwi_controller *phwi_ctrlr,
2782 		       unsigned int def_pdu_ring_sz)
2783 {
2784 	unsigned int idx;
2785 	int ret;
2786 	struct be_queue_info *dq, *cq;
2787 	struct be_dma_mem *mem;
2788 	struct be_mem_descriptor *mem_descr;
2789 	void *dq_vaddress;
2790 
2791 	idx = 0;
2792 	dq = &phwi_context->be_def_hdrq;
2793 	cq = &phwi_context->be_cq[0];
2794 	mem = &dq->dma_mem;
2795 	mem_descr = phba->init_mem;
2796 	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2797 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2798 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2799 			    sizeof(struct phys_addr),
2800 			    sizeof(struct phys_addr), dq_vaddress);
2801 	if (ret) {
2802 		shost_printk(KERN_ERR, phba->shost,
2803 			     "be_fill_queue Failed for DEF PDU HDR\n");
2804 		return ret;
2805 	}
2806 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
2807 				  bus_address.u.a64.address;
2808 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2809 					      def_pdu_ring_sz,
2810 					      phba->params.defpdu_hdr_sz);
2811 	if (ret) {
2812 		shost_printk(KERN_ERR, phba->shost,
2813 			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2814 		return ret;
2815 	}
2816 	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2817 	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2818 		 phwi_context->be_def_hdrq.id);
2819 	hwi_post_async_buffers(phba, 1);
2820 	return 0;
2821 }
2822 
2823 static int
2824 beiscsi_create_def_data(struct beiscsi_hba *phba,
2825 			struct hwi_context_memory *phwi_context,
2826 			struct hwi_controller *phwi_ctrlr,
2827 			unsigned int def_pdu_ring_sz)
2828 {
2829 	unsigned int idx;
2830 	int ret;
2831 	struct be_queue_info *dataq, *cq;
2832 	struct be_dma_mem *mem;
2833 	struct be_mem_descriptor *mem_descr;
2834 	void *dq_vaddress;
2835 
2836 	idx = 0;
2837 	dataq = &phwi_context->be_def_dataq;
2838 	cq = &phwi_context->be_cq[0];
2839 	mem = &dataq->dma_mem;
2840 	mem_descr = phba->init_mem;
2841 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2842 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2843 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2844 			    sizeof(struct phys_addr),
2845 			    sizeof(struct phys_addr), dq_vaddress);
2846 	if (ret) {
2847 		shost_printk(KERN_ERR, phba->shost,
2848 			     "be_fill_queue Failed for DEF PDU DATA\n");
2849 		return ret;
2850 	}
2851 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
2852 				  bus_address.u.a64.address;
2853 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2854 					      def_pdu_ring_sz,
2855 					      phba->params.defpdu_data_sz);
2856 	if (ret) {
2857 		shost_printk(KERN_ERR, phba->shost,
2858 			     "be_cmd_create_default_pdu_queue Failed"
2859 			     " for DEF PDU DATA\n");
2860 		return ret;
2861 	}
2862 	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2863 	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2864 		 phwi_context->be_def_dataq.id);
2865 	hwi_post_async_buffers(phba, 0);
2866 	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2867 	return 0;
2868 }
2869 
2870 static int
2871 beiscsi_post_pages(struct beiscsi_hba *phba)
2872 {
2873 	struct be_mem_descriptor *mem_descr;
2874 	struct mem_array *pm_arr;
2875 	unsigned int page_offset, i;
2876 	struct be_dma_mem sgl;
2877 	int status;
2878 
2879 	mem_descr = phba->init_mem;
2880 	mem_descr += HWI_MEM_SGE;
2881 	pm_arr = mem_descr->mem_array;
2882 
2883 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2884 			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2885 	for (i = 0; i < mem_descr->num_elements; i++) {
2886 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2887 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2888 						page_offset,
2889 						(pm_arr->size / PAGE_SIZE));
2890 		page_offset += pm_arr->size / PAGE_SIZE;
2891 		if (status != 0) {
2892 			shost_printk(KERN_ERR, phba->shost,
2893 				     "post sgl failed.\n");
2894 			return status;
2895 		}
2896 		pm_arr++;
2897 	}
2898 	SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2899 	return 0;
2900 }
2901 
2902 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2903 {
2904 	struct be_dma_mem *mem = &q->dma_mem;
2905 	if (mem->va) {
2906 		pci_free_consistent(phba->pcidev, mem->size,
2907 			mem->va, mem->dma);
2908 		mem->va = NULL;
2909 	}
2910 }
2911 
2912 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2913 		u16 len, u16 entry_size)
2914 {
2915 	struct be_dma_mem *mem = &q->dma_mem;
2916 
2917 	memset(q, 0, sizeof(*q));
2918 	q->len = len;
2919 	q->entry_size = entry_size;
2920 	mem->size = len * entry_size;
2921 	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2922 	if (!mem->va)
2923 		return -ENOMEM;
2924 	memset(mem->va, 0, mem->size);
2925 	return 0;
2926 }
2927 
2928 static int
2929 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2930 			 struct hwi_context_memory *phwi_context,
2931 			 struct hwi_controller *phwi_ctrlr)
2932 {
2933 	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2934 	u64 pa_addr_lo;
2935 	unsigned int idx, num, i;
2936 	struct mem_array *pwrb_arr;
2937 	void *wrb_vaddr;
2938 	struct be_dma_mem sgl;
2939 	struct be_mem_descriptor *mem_descr;
2940 	int status;
2941 
2942 	idx = 0;
2943 	mem_descr = phba->init_mem;
2944 	mem_descr += HWI_MEM_WRB;
2945 	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2946 			   GFP_KERNEL);
2947 	if (!pwrb_arr) {
2948 		shost_printk(KERN_ERR, phba->shost,
2949 			     "Memory alloc failed in create wrb ring.\n");
2950 		return -ENOMEM;
2951 	}
2952 	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2953 	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2954 	num_wrb_rings = mem_descr->mem_array[idx].size /
2955 		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2956 
2957 	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2958 		if (num_wrb_rings) {
2959 			pwrb_arr[num].virtual_address = wrb_vaddr;
2960 			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2961 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2962 					    sizeof(struct iscsi_wrb);
2963 			wrb_vaddr += pwrb_arr[num].size;
2964 			pa_addr_lo += pwrb_arr[num].size;
2965 			num_wrb_rings--;
2966 		} else {
2967 			idx++;
2968 			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2969 			pa_addr_lo = mem_descr->mem_array[idx].\
2970 					bus_address.u.a64.address;
2971 			num_wrb_rings = mem_descr->mem_array[idx].size /
2972 					(phba->params.wrbs_per_cxn *
2973 					sizeof(struct iscsi_wrb));
2974 			pwrb_arr[num].virtual_address = wrb_vaddr;
2975 			pwrb_arr[num].bus_address.u.a64.address\
2976 						= pa_addr_lo;
2977 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2978 						 sizeof(struct iscsi_wrb);
2979 			wrb_vaddr += pwrb_arr[num].size;
2980 			pa_addr_lo   += pwrb_arr[num].size;
2981 			num_wrb_rings--;
2982 		}
2983 	}
2984 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2985 		wrb_mem_index = 0;
2986 		offset = 0;
2987 		size = 0;
2988 
2989 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2990 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2991 					    &phwi_context->be_wrbq[i]);
2992 		if (status != 0) {
2993 			shost_printk(KERN_ERR, phba->shost,
2994 				     "wrbq create failed.");
2995 			kfree(pwrb_arr);
2996 			return status;
2997 		}
2998 		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2999 								   id;
3000 	}
3001 	kfree(pwrb_arr);
3002 	return 0;
3003 }
3004 
3005 static void free_wrb_handles(struct beiscsi_hba *phba)
3006 {
3007 	unsigned int index;
3008 	struct hwi_controller *phwi_ctrlr;
3009 	struct hwi_wrb_context *pwrb_context;
3010 
3011 	phwi_ctrlr = phba->phwi_ctrlr;
3012 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3013 		pwrb_context = &phwi_ctrlr->wrb_context[index];
3014 		kfree(pwrb_context->pwrb_handle_base);
3015 		kfree(pwrb_context->pwrb_handle_basestd);
3016 	}
3017 }
3018 
3019 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3020 {
3021 	struct be_queue_info *q;
3022 	struct be_ctrl_info *ctrl = &phba->ctrl;
3023 
3024 	q = &phba->ctrl.mcc_obj.q;
3025 	if (q->created)
3026 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3027 	be_queue_free(phba, q);
3028 
3029 	q = &phba->ctrl.mcc_obj.cq;
3030 	if (q->created)
3031 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3032 	be_queue_free(phba, q);
3033 }
3034 
3035 static void hwi_cleanup(struct beiscsi_hba *phba)
3036 {
3037 	struct be_queue_info *q;
3038 	struct be_ctrl_info *ctrl = &phba->ctrl;
3039 	struct hwi_controller *phwi_ctrlr;
3040 	struct hwi_context_memory *phwi_context;
3041 	int i, eq_num;
3042 
3043 	phwi_ctrlr = phba->phwi_ctrlr;
3044 	phwi_context = phwi_ctrlr->phwi_ctxt;
3045 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3046 		q = &phwi_context->be_wrbq[i];
3047 		if (q->created)
3048 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3049 	}
3050 	free_wrb_handles(phba);
3051 
3052 	q = &phwi_context->be_def_hdrq;
3053 	if (q->created)
3054 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3055 
3056 	q = &phwi_context->be_def_dataq;
3057 	if (q->created)
3058 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3059 
3060 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3061 
3062 	for (i = 0; i < (phba->num_cpus); i++) {
3063 		q = &phwi_context->be_cq[i];
3064 		if (q->created)
3065 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3066 	}
3067 	if (phba->msix_enabled)
3068 		eq_num = 1;
3069 	else
3070 		eq_num = 0;
3071 	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3072 		q = &phwi_context->be_eq[i].q;
3073 		if (q->created)
3074 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3075 	}
3076 	be_mcc_queues_destroy(phba);
3077 }
3078 
3079 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3080 				struct hwi_context_memory *phwi_context)
3081 {
3082 	struct be_queue_info *q, *cq;
3083 	struct be_ctrl_info *ctrl = &phba->ctrl;
3084 
3085 	/* Alloc MCC compl queue */
3086 	cq = &phba->ctrl.mcc_obj.cq;
3087 	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3088 			sizeof(struct be_mcc_compl)))
3089 		goto err;
3090 	/* Ask BE to create MCC compl queue; */
3091 	if (phba->msix_enabled) {
3092 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3093 					 [phba->num_cpus].q, false, true, 0))
3094 		goto mcc_cq_free;
3095 	} else {
3096 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3097 					  false, true, 0))
3098 		goto mcc_cq_free;
3099 	}
3100 
3101 	/* Alloc MCC queue */
3102 	q = &phba->ctrl.mcc_obj.q;
3103 	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3104 		goto mcc_cq_destroy;
3105 
3106 	/* Ask BE to create MCC queue */
3107 	if (beiscsi_cmd_mccq_create(phba, q, cq))
3108 		goto mcc_q_free;
3109 
3110 	return 0;
3111 
3112 mcc_q_free:
3113 	be_queue_free(phba, q);
3114 mcc_cq_destroy:
3115 	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3116 mcc_cq_free:
3117 	be_queue_free(phba, cq);
3118 err:
3119 	return -ENOMEM;
3120 }
3121 
3122 static int find_num_cpus(void)
3123 {
3124 	int  num_cpus = 0;
3125 
3126 	num_cpus = num_online_cpus();
3127 	if (num_cpus >= MAX_CPUS)
3128 		num_cpus = MAX_CPUS - 1;
3129 
3130 	SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
3131 	return num_cpus;
3132 }
3133 
3134 static int hwi_init_port(struct beiscsi_hba *phba)
3135 {
3136 	struct hwi_controller *phwi_ctrlr;
3137 	struct hwi_context_memory *phwi_context;
3138 	unsigned int def_pdu_ring_sz;
3139 	struct be_ctrl_info *ctrl = &phba->ctrl;
3140 	int status;
3141 
3142 	def_pdu_ring_sz =
3143 		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3144 	phwi_ctrlr = phba->phwi_ctrlr;
3145 	phwi_context = phwi_ctrlr->phwi_ctxt;
3146 	phwi_context->max_eqd = 0;
3147 	phwi_context->min_eqd = 0;
3148 	phwi_context->cur_eqd = 64;
3149 	be_cmd_fw_initialize(&phba->ctrl);
3150 
3151 	status = beiscsi_create_eqs(phba, phwi_context);
3152 	if (status != 0) {
3153 		shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
3154 		goto error;
3155 	}
3156 
3157 	status = be_mcc_queues_create(phba, phwi_context);
3158 	if (status != 0)
3159 		goto error;
3160 
3161 	status = mgmt_check_supported_fw(ctrl, phba);
3162 	if (status != 0) {
3163 		shost_printk(KERN_ERR, phba->shost,
3164 			     "Unsupported fw version\n");
3165 		goto error;
3166 	}
3167 
3168 	status = beiscsi_create_cqs(phba, phwi_context);
3169 	if (status != 0) {
3170 		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3171 		goto error;
3172 	}
3173 
3174 	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3175 					def_pdu_ring_sz);
3176 	if (status != 0) {
3177 		shost_printk(KERN_ERR, phba->shost,
3178 			     "Default Header not created\n");
3179 		goto error;
3180 	}
3181 
3182 	status = beiscsi_create_def_data(phba, phwi_context,
3183 					 phwi_ctrlr, def_pdu_ring_sz);
3184 	if (status != 0) {
3185 		shost_printk(KERN_ERR, phba->shost,
3186 			     "Default Data not created\n");
3187 		goto error;
3188 	}
3189 
3190 	status = beiscsi_post_pages(phba);
3191 	if (status != 0) {
3192 		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3193 		goto error;
3194 	}
3195 
3196 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
3197 	if (status != 0) {
3198 		shost_printk(KERN_ERR, phba->shost,
3199 			     "WRB Rings not created\n");
3200 		goto error;
3201 	}
3202 
3203 	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3204 	return 0;
3205 
3206 error:
3207 	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3208 	hwi_cleanup(phba);
3209 	return status;
3210 }
3211 
3212 static int hwi_init_controller(struct beiscsi_hba *phba)
3213 {
3214 	struct hwi_controller *phwi_ctrlr;
3215 
3216 	phwi_ctrlr = phba->phwi_ctrlr;
3217 	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3218 		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3219 		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3220 		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3221 			 phwi_ctrlr->phwi_ctxt);
3222 	} else {
3223 		shost_printk(KERN_ERR, phba->shost,
3224 			     "HWI_MEM_ADDN_CONTEXT is more than one element."
3225 			     "Failing to load\n");
3226 		return -ENOMEM;
3227 	}
3228 
3229 	iscsi_init_global_templates(phba);
3230 	if (beiscsi_init_wrb_handle(phba))
3231 		return -ENOMEM;
3232 
3233 	hwi_init_async_pdu_ctx(phba);
3234 	if (hwi_init_port(phba) != 0) {
3235 		shost_printk(KERN_ERR, phba->shost,
3236 			     "hwi_init_controller failed\n");
3237 		return -ENOMEM;
3238 	}
3239 	return 0;
3240 }
3241 
3242 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3243 {
3244 	struct be_mem_descriptor *mem_descr;
3245 	int i, j;
3246 
3247 	mem_descr = phba->init_mem;
3248 	i = 0;
3249 	j = 0;
3250 	for (i = 0; i < SE_MEM_MAX; i++) {
3251 		for (j = mem_descr->num_elements; j > 0; j--) {
3252 			pci_free_consistent(phba->pcidev,
3253 			  mem_descr->mem_array[j - 1].size,
3254 			  mem_descr->mem_array[j - 1].virtual_address,
3255 			  (unsigned long)mem_descr->mem_array[j - 1].
3256 			  bus_address.u.a64.address);
3257 		}
3258 		kfree(mem_descr->mem_array);
3259 		mem_descr++;
3260 	}
3261 	kfree(phba->init_mem);
3262 	kfree(phba->phwi_ctrlr);
3263 }
3264 
3265 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3266 {
3267 	int ret = -ENOMEM;
3268 
3269 	ret = beiscsi_get_memory(phba);
3270 	if (ret < 0) {
3271 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3272 			     "Failed in beiscsi_alloc_memory\n");
3273 		return ret;
3274 	}
3275 
3276 	ret = hwi_init_controller(phba);
3277 	if (ret)
3278 		goto free_init;
3279 	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3280 	return 0;
3281 
3282 free_init:
3283 	beiscsi_free_mem(phba);
3284 	return ret;
3285 }
3286 
3287 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3288 {
3289 	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3290 	struct sgl_handle *psgl_handle;
3291 	struct iscsi_sge *pfrag;
3292 	unsigned int arr_index, i, idx;
3293 
3294 	phba->io_sgl_hndl_avbl = 0;
3295 	phba->eh_sgl_hndl_avbl = 0;
3296 
3297 	mem_descr_sglh = phba->init_mem;
3298 	mem_descr_sglh += HWI_MEM_SGLH;
3299 	if (1 == mem_descr_sglh->num_elements) {
3300 		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3301 						 phba->params.ios_per_ctrl,
3302 						 GFP_KERNEL);
3303 		if (!phba->io_sgl_hndl_base) {
3304 			shost_printk(KERN_ERR, phba->shost,
3305 				     "Mem Alloc Failed. Failing to load\n");
3306 			return -ENOMEM;
3307 		}
3308 		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3309 						 (phba->params.icds_per_ctrl -
3310 						 phba->params.ios_per_ctrl),
3311 						 GFP_KERNEL);
3312 		if (!phba->eh_sgl_hndl_base) {
3313 			kfree(phba->io_sgl_hndl_base);
3314 			shost_printk(KERN_ERR, phba->shost,
3315 				     "Mem Alloc Failed. Failing to load\n");
3316 			return -ENOMEM;
3317 		}
3318 	} else {
3319 		shost_printk(KERN_ERR, phba->shost,
3320 			     "HWI_MEM_SGLH is more than one element."
3321 			     "Failing to load\n");
3322 		return -ENOMEM;
3323 	}
3324 
3325 	arr_index = 0;
3326 	idx = 0;
3327 	while (idx < mem_descr_sglh->num_elements) {
3328 		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3329 
3330 		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3331 		      sizeof(struct sgl_handle)); i++) {
3332 			if (arr_index < phba->params.ios_per_ctrl) {
3333 				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3334 				phba->io_sgl_hndl_avbl++;
3335 				arr_index++;
3336 			} else {
3337 				phba->eh_sgl_hndl_base[arr_index -
3338 					phba->params.ios_per_ctrl] =
3339 								psgl_handle;
3340 				arr_index++;
3341 				phba->eh_sgl_hndl_avbl++;
3342 			}
3343 			psgl_handle++;
3344 		}
3345 		idx++;
3346 	}
3347 	SE_DEBUG(DBG_LVL_8,
3348 		 "phba->io_sgl_hndl_avbl=%d"
3349 		 "phba->eh_sgl_hndl_avbl=%d\n",
3350 		 phba->io_sgl_hndl_avbl,
3351 		 phba->eh_sgl_hndl_avbl);
3352 	mem_descr_sg = phba->init_mem;
3353 	mem_descr_sg += HWI_MEM_SGE;
3354 	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3355 		 mem_descr_sg->num_elements);
3356 	arr_index = 0;
3357 	idx = 0;
3358 	while (idx < mem_descr_sg->num_elements) {
3359 		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3360 
3361 		for (i = 0;
3362 		     i < (mem_descr_sg->mem_array[idx].size) /
3363 		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3364 		     i++) {
3365 			if (arr_index < phba->params.ios_per_ctrl)
3366 				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3367 			else
3368 				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3369 						phba->params.ios_per_ctrl];
3370 			psgl_handle->pfrag = pfrag;
3371 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3372 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3373 			pfrag += phba->params.num_sge_per_io;
3374 			psgl_handle->sgl_index =
3375 				phba->fw_config.iscsi_icd_start + arr_index++;
3376 		}
3377 		idx++;
3378 	}
3379 	phba->io_sgl_free_index = 0;
3380 	phba->io_sgl_alloc_index = 0;
3381 	phba->eh_sgl_free_index = 0;
3382 	phba->eh_sgl_alloc_index = 0;
3383 	return 0;
3384 }
3385 
3386 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3387 {
3388 	int i, new_cid;
3389 
3390 	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3391 				  GFP_KERNEL);
3392 	if (!phba->cid_array) {
3393 		shost_printk(KERN_ERR, phba->shost,
3394 			     "Failed to allocate memory in "
3395 			     "hba_setup_cid_tbls\n");
3396 		return -ENOMEM;
3397 	}
3398 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3399 				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3400 	if (!phba->ep_array) {
3401 		shost_printk(KERN_ERR, phba->shost,
3402 			     "Failed to allocate memory in "
3403 			     "hba_setup_cid_tbls\n");
3404 		kfree(phba->cid_array);
3405 		return -ENOMEM;
3406 	}
3407 	new_cid = phba->fw_config.iscsi_cid_start;
3408 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3409 		phba->cid_array[i] = new_cid;
3410 		new_cid += 2;
3411 	}
3412 	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3413 	return 0;
3414 }
3415 
3416 static void hwi_enable_intr(struct beiscsi_hba *phba)
3417 {
3418 	struct be_ctrl_info *ctrl = &phba->ctrl;
3419 	struct hwi_controller *phwi_ctrlr;
3420 	struct hwi_context_memory *phwi_context;
3421 	struct be_queue_info *eq;
3422 	u8 __iomem *addr;
3423 	u32 reg, i;
3424 	u32 enabled;
3425 
3426 	phwi_ctrlr = phba->phwi_ctrlr;
3427 	phwi_context = phwi_ctrlr->phwi_ctxt;
3428 
3429 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3430 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3431 	reg = ioread32(addr);
3432 
3433 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3434 	if (!enabled) {
3435 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3436 		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3437 		iowrite32(reg, addr);
3438 	}
3439 
3440 	if (!phba->msix_enabled) {
3441 		eq = &phwi_context->be_eq[0].q;
3442 		SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3443 		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3444 	} else {
3445 		for (i = 0; i <= phba->num_cpus; i++) {
3446 			eq = &phwi_context->be_eq[i].q;
3447 			SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3448 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3449 		}
3450 	}
3451 }
3452 
3453 static void hwi_disable_intr(struct beiscsi_hba *phba)
3454 {
3455 	struct be_ctrl_info *ctrl = &phba->ctrl;
3456 
3457 	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3458 	u32 reg = ioread32(addr);
3459 
3460 	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3461 	if (enabled) {
3462 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3463 		iowrite32(reg, addr);
3464 	} else
3465 		shost_printk(KERN_WARNING, phba->shost,
3466 			     "In hwi_disable_intr, Already Disabled\n");
3467 }
3468 
3469 static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3470 {
3471 	struct be_cmd_get_boot_target_resp *boot_resp;
3472 	struct be_cmd_get_session_resp *session_resp;
3473 	struct be_mcc_wrb *wrb;
3474 	struct be_dma_mem nonemb_cmd;
3475 	unsigned int tag, wrb_num;
3476 	unsigned short status, extd_status;
3477 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3478 	int ret = -ENOMEM;
3479 
3480 	tag = mgmt_get_boot_target(phba);
3481 	if (!tag) {
3482 		SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed\n");
3483 		return -EAGAIN;
3484 	} else
3485 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3486 					 phba->ctrl.mcc_numtag[tag]);
3487 
3488 	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3489 	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3490 	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3491 	if (status || extd_status) {
3492 		SE_DEBUG(DBG_LVL_1, "beiscsi_get_boot_info Failed"
3493 				    " status = %d extd_status = %d\n",
3494 				    status, extd_status);
3495 		free_mcc_tag(&phba->ctrl, tag);
3496 		return -EBUSY;
3497 	}
3498 	wrb = queue_get_wrb(mccq, wrb_num);
3499 	free_mcc_tag(&phba->ctrl, tag);
3500 	boot_resp = embedded_payload(wrb);
3501 
3502 	if (boot_resp->boot_session_handle < 0) {
3503 		shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3504 		return -ENXIO;
3505 	}
3506 
3507 	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3508 				sizeof(*session_resp),
3509 				&nonemb_cmd.dma);
3510 	if (nonemb_cmd.va == NULL) {
3511 		SE_DEBUG(DBG_LVL_1,
3512 			 "Failed to allocate memory for"
3513 			 "beiscsi_get_session_info\n");
3514 		return -ENOMEM;
3515 	}
3516 
3517 	memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3518 	tag = mgmt_get_session_info(phba, boot_resp->boot_session_handle,
3519 				    &nonemb_cmd);
3520 	if (!tag) {
3521 		SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3522 			" Failed\n");
3523 		goto boot_freemem;
3524 	} else
3525 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3526 					 phba->ctrl.mcc_numtag[tag]);
3527 
3528 	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3529 	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3530 	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3531 	if (status || extd_status) {
3532 		SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3533 				    " status = %d extd_status = %d\n",
3534 				    status, extd_status);
3535 		free_mcc_tag(&phba->ctrl, tag);
3536 		goto boot_freemem;
3537 	}
3538 	wrb = queue_get_wrb(mccq, wrb_num);
3539 	free_mcc_tag(&phba->ctrl, tag);
3540 	session_resp = nonemb_cmd.va ;
3541 
3542 	memcpy(&phba->boot_sess, &session_resp->session_info,
3543 	       sizeof(struct mgmt_session_info));
3544 	ret = 0;
3545 
3546 boot_freemem:
3547 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3548 		    nonemb_cmd.va, nonemb_cmd.dma);
3549 	return ret;
3550 }
3551 
3552 static void beiscsi_boot_release(void *data)
3553 {
3554 	struct beiscsi_hba *phba = data;
3555 
3556 	scsi_host_put(phba->shost);
3557 }
3558 
3559 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3560 {
3561 	struct iscsi_boot_kobj *boot_kobj;
3562 
3563 	/* get boot info using mgmt cmd */
3564 	if (beiscsi_get_boot_info(phba))
3565 		/* Try to see if we can carry on without this */
3566 		return 0;
3567 
3568 	phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3569 	if (!phba->boot_kset)
3570 		return -ENOMEM;
3571 
3572 	/* get a ref because the show function will ref the phba */
3573 	if (!scsi_host_get(phba->shost))
3574 		goto free_kset;
3575 	boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3576 					     beiscsi_show_boot_tgt_info,
3577 					     beiscsi_tgt_get_attr_visibility,
3578 					     beiscsi_boot_release);
3579 	if (!boot_kobj)
3580 		goto put_shost;
3581 
3582 	if (!scsi_host_get(phba->shost))
3583 		goto free_kset;
3584 	boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3585 						beiscsi_show_boot_ini_info,
3586 						beiscsi_ini_get_attr_visibility,
3587 						beiscsi_boot_release);
3588 	if (!boot_kobj)
3589 		goto put_shost;
3590 
3591 	if (!scsi_host_get(phba->shost))
3592 		goto free_kset;
3593 	boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3594 					       beiscsi_show_boot_eth_info,
3595 					       beiscsi_eth_get_attr_visibility,
3596 					       beiscsi_boot_release);
3597 	if (!boot_kobj)
3598 		goto put_shost;
3599 	return 0;
3600 
3601 put_shost:
3602 	scsi_host_put(phba->shost);
3603 free_kset:
3604 	iscsi_boot_destroy_kset(phba->boot_kset);
3605 	return -ENOMEM;
3606 }
3607 
3608 static int beiscsi_init_port(struct beiscsi_hba *phba)
3609 {
3610 	int ret;
3611 
3612 	ret = beiscsi_init_controller(phba);
3613 	if (ret < 0) {
3614 		shost_printk(KERN_ERR, phba->shost,
3615 			     "beiscsi_dev_probe - Failed in"
3616 			     "beiscsi_init_controller\n");
3617 		return ret;
3618 	}
3619 	ret = beiscsi_init_sgl_handle(phba);
3620 	if (ret < 0) {
3621 		shost_printk(KERN_ERR, phba->shost,
3622 			     "beiscsi_dev_probe - Failed in"
3623 			     "beiscsi_init_sgl_handle\n");
3624 		goto do_cleanup_ctrlr;
3625 	}
3626 
3627 	if (hba_setup_cid_tbls(phba)) {
3628 		shost_printk(KERN_ERR, phba->shost,
3629 			     "Failed in hba_setup_cid_tbls\n");
3630 		kfree(phba->io_sgl_hndl_base);
3631 		kfree(phba->eh_sgl_hndl_base);
3632 		goto do_cleanup_ctrlr;
3633 	}
3634 
3635 	return ret;
3636 
3637 do_cleanup_ctrlr:
3638 	hwi_cleanup(phba);
3639 	return ret;
3640 }
3641 
3642 static void hwi_purge_eq(struct beiscsi_hba *phba)
3643 {
3644 	struct hwi_controller *phwi_ctrlr;
3645 	struct hwi_context_memory *phwi_context;
3646 	struct be_queue_info *eq;
3647 	struct be_eq_entry *eqe = NULL;
3648 	int i, eq_msix;
3649 	unsigned int num_processed;
3650 
3651 	phwi_ctrlr = phba->phwi_ctrlr;
3652 	phwi_context = phwi_ctrlr->phwi_ctxt;
3653 	if (phba->msix_enabled)
3654 		eq_msix = 1;
3655 	else
3656 		eq_msix = 0;
3657 
3658 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3659 		eq = &phwi_context->be_eq[i].q;
3660 		eqe = queue_tail_node(eq);
3661 		num_processed = 0;
3662 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3663 					& EQE_VALID_MASK) {
3664 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3665 			queue_tail_inc(eq);
3666 			eqe = queue_tail_node(eq);
3667 			num_processed++;
3668 		}
3669 
3670 		if (num_processed)
3671 			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3672 	}
3673 }
3674 
3675 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3676 {
3677 	int mgmt_status;
3678 
3679 	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3680 	if (mgmt_status)
3681 		shost_printk(KERN_WARNING, phba->shost,
3682 			     "mgmt_epfw_cleanup FAILED\n");
3683 
3684 	hwi_purge_eq(phba);
3685 	hwi_cleanup(phba);
3686 	kfree(phba->io_sgl_hndl_base);
3687 	kfree(phba->eh_sgl_hndl_base);
3688 	kfree(phba->cid_array);
3689 	kfree(phba->ep_array);
3690 }
3691 
3692 static void beiscsi_cleanup_task(struct iscsi_task *task)
3693 {
3694 	struct beiscsi_io_task *io_task = task->dd_data;
3695 	struct iscsi_conn *conn = task->conn;
3696 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3697 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3698 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3699 	struct hwi_wrb_context *pwrb_context;
3700 	struct hwi_controller *phwi_ctrlr;
3701 
3702 	phwi_ctrlr = phba->phwi_ctrlr;
3703 	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3704 			- phba->fw_config.iscsi_cid_start];
3705 
3706 	if (io_task->cmd_bhs) {
3707 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3708 			      io_task->bhs_pa.u.a64.address);
3709 		io_task->cmd_bhs = NULL;
3710 	}
3711 
3712 	if (task->sc) {
3713 		if (io_task->pwrb_handle) {
3714 			free_wrb_handle(phba, pwrb_context,
3715 					io_task->pwrb_handle);
3716 			io_task->pwrb_handle = NULL;
3717 		}
3718 
3719 		if (io_task->psgl_handle) {
3720 			spin_lock(&phba->io_sgl_lock);
3721 			free_io_sgl_handle(phba, io_task->psgl_handle);
3722 			spin_unlock(&phba->io_sgl_lock);
3723 			io_task->psgl_handle = NULL;
3724 		}
3725 	} else {
3726 		if (!beiscsi_conn->login_in_progress) {
3727 			if (io_task->pwrb_handle) {
3728 				free_wrb_handle(phba, pwrb_context,
3729 						io_task->pwrb_handle);
3730 				io_task->pwrb_handle = NULL;
3731 			}
3732 			if (io_task->psgl_handle) {
3733 				spin_lock(&phba->mgmt_sgl_lock);
3734 				free_mgmt_sgl_handle(phba,
3735 						     io_task->psgl_handle);
3736 				spin_unlock(&phba->mgmt_sgl_lock);
3737 				io_task->psgl_handle = NULL;
3738 			}
3739 		}
3740 	}
3741 }
3742 
3743 void
3744 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3745 			   struct beiscsi_offload_params *params)
3746 {
3747 	struct wrb_handle *pwrb_handle;
3748 	struct iscsi_target_context_update_wrb *pwrb = NULL;
3749 	struct be_mem_descriptor *mem_descr;
3750 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3751 	struct iscsi_task *task = beiscsi_conn->task;
3752 	struct iscsi_session *session = task->conn->session;
3753 	u32 doorbell = 0;
3754 
3755 	/*
3756 	 * We can always use 0 here because it is reserved by libiscsi for
3757 	 * login/startup related tasks.
3758 	 */
3759 	beiscsi_conn->login_in_progress = 0;
3760 	spin_lock_bh(&session->lock);
3761 	beiscsi_cleanup_task(task);
3762 	spin_unlock_bh(&session->lock);
3763 
3764 	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3765 				       phba->fw_config.iscsi_cid_start));
3766 	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3767 	memset(pwrb, 0, sizeof(*pwrb));
3768 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3769 		      max_burst_length, pwrb, params->dw[offsetof
3770 		      (struct amap_beiscsi_offload_params,
3771 		      max_burst_length) / 32]);
3772 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3773 		      max_send_data_segment_length, pwrb,
3774 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3775 		      max_send_data_segment_length) / 32]);
3776 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3777 		      first_burst_length,
3778 		      pwrb,
3779 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3780 		      first_burst_length) / 32]);
3781 
3782 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3783 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3784 		      erl) / 32] & OFFLD_PARAMS_ERL));
3785 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3786 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3787 		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3788 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3789 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3790 		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3791 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3792 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3793 		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3794 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3795 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3796 		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3797 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3798 		      pwrb,
3799 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3800 		      exp_statsn) / 32] + 1));
3801 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3802 		      0x7);
3803 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3804 		      pwrb, pwrb_handle->wrb_index);
3805 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3806 		      pwrb, pwrb_handle->nxt_wrb_index);
3807 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3808 			session_state, pwrb, 0);
3809 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3810 		      pwrb, 1);
3811 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3812 		      pwrb, 0);
3813 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3814 		      0);
3815 
3816 	mem_descr = phba->init_mem;
3817 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3818 
3819 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3820 			pad_buffer_addr_hi, pwrb,
3821 		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3822 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3823 			pad_buffer_addr_lo, pwrb,
3824 		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3825 
3826 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3827 
3828 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3829 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3830 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3831 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3832 
3833 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3834 }
3835 
3836 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3837 			      int *index, int *age)
3838 {
3839 	*index = (int)itt;
3840 	if (age)
3841 		*age = conn->session->age;
3842 }
3843 
3844 /**
3845  * beiscsi_alloc_pdu - allocates pdu and related resources
3846  * @task: libiscsi task
3847  * @opcode: opcode of pdu for task
3848  *
3849  * This is called with the session lock held. It will allocate
3850  * the wrb and sgl if needed for the command. And it will prep
3851  * the pdu's itt. beiscsi_parse_pdu will later translate
3852  * the pdu itt to the libiscsi task itt.
3853  */
3854 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3855 {
3856 	struct beiscsi_io_task *io_task = task->dd_data;
3857 	struct iscsi_conn *conn = task->conn;
3858 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3859 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3860 	struct hwi_wrb_context *pwrb_context;
3861 	struct hwi_controller *phwi_ctrlr;
3862 	itt_t itt;
3863 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3864 	dma_addr_t paddr;
3865 
3866 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3867 					  GFP_ATOMIC, &paddr);
3868 	if (!io_task->cmd_bhs)
3869 		return -ENOMEM;
3870 	io_task->bhs_pa.u.a64.address = paddr;
3871 	io_task->libiscsi_itt = (itt_t)task->itt;
3872 	io_task->conn = beiscsi_conn;
3873 
3874 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3875 	task->hdr_max = sizeof(struct be_cmd_bhs);
3876 	io_task->psgl_handle = NULL;
3877 	io_task->pwrb_handle = NULL;
3878 
3879 	if (task->sc) {
3880 		spin_lock(&phba->io_sgl_lock);
3881 		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3882 		spin_unlock(&phba->io_sgl_lock);
3883 		if (!io_task->psgl_handle)
3884 			goto free_hndls;
3885 		io_task->pwrb_handle = alloc_wrb_handle(phba,
3886 					beiscsi_conn->beiscsi_conn_cid -
3887 					phba->fw_config.iscsi_cid_start);
3888 		if (!io_task->pwrb_handle)
3889 			goto free_io_hndls;
3890 	} else {
3891 		io_task->scsi_cmnd = NULL;
3892 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3893 			if (!beiscsi_conn->login_in_progress) {
3894 				spin_lock(&phba->mgmt_sgl_lock);
3895 				io_task->psgl_handle = (struct sgl_handle *)
3896 						alloc_mgmt_sgl_handle(phba);
3897 				spin_unlock(&phba->mgmt_sgl_lock);
3898 				if (!io_task->psgl_handle)
3899 					goto free_hndls;
3900 
3901 				beiscsi_conn->login_in_progress = 1;
3902 				beiscsi_conn->plogin_sgl_handle =
3903 							io_task->psgl_handle;
3904 				io_task->pwrb_handle =
3905 					alloc_wrb_handle(phba,
3906 					beiscsi_conn->beiscsi_conn_cid -
3907 					phba->fw_config.iscsi_cid_start);
3908 				if (!io_task->pwrb_handle)
3909 					goto free_io_hndls;
3910 				beiscsi_conn->plogin_wrb_handle =
3911 							io_task->pwrb_handle;
3912 
3913 			} else {
3914 				io_task->psgl_handle =
3915 						beiscsi_conn->plogin_sgl_handle;
3916 				io_task->pwrb_handle =
3917 						beiscsi_conn->plogin_wrb_handle;
3918 			}
3919 			beiscsi_conn->task = task;
3920 		} else {
3921 			spin_lock(&phba->mgmt_sgl_lock);
3922 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3923 			spin_unlock(&phba->mgmt_sgl_lock);
3924 			if (!io_task->psgl_handle)
3925 				goto free_hndls;
3926 			io_task->pwrb_handle =
3927 					alloc_wrb_handle(phba,
3928 					beiscsi_conn->beiscsi_conn_cid -
3929 					phba->fw_config.iscsi_cid_start);
3930 			if (!io_task->pwrb_handle)
3931 				goto free_mgmt_hndls;
3932 
3933 		}
3934 	}
3935 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3936 				 wrb_index << 16) | (unsigned int)
3937 				(io_task->psgl_handle->sgl_index));
3938 	io_task->pwrb_handle->pio_handle = task;
3939 
3940 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3941 	return 0;
3942 
3943 free_io_hndls:
3944 	spin_lock(&phba->io_sgl_lock);
3945 	free_io_sgl_handle(phba, io_task->psgl_handle);
3946 	spin_unlock(&phba->io_sgl_lock);
3947 	goto free_hndls;
3948 free_mgmt_hndls:
3949 	spin_lock(&phba->mgmt_sgl_lock);
3950 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3951 	spin_unlock(&phba->mgmt_sgl_lock);
3952 free_hndls:
3953 	phwi_ctrlr = phba->phwi_ctrlr;
3954 	pwrb_context = &phwi_ctrlr->wrb_context[
3955 			beiscsi_conn->beiscsi_conn_cid -
3956 			phba->fw_config.iscsi_cid_start];
3957 	if (io_task->pwrb_handle)
3958 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3959 	io_task->pwrb_handle = NULL;
3960 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3961 		      io_task->bhs_pa.u.a64.address);
3962 	io_task->cmd_bhs = NULL;
3963 	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3964 	return -ENOMEM;
3965 }
3966 
3967 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3968 			  unsigned int num_sg, unsigned int xferlen,
3969 			  unsigned int writedir)
3970 {
3971 
3972 	struct beiscsi_io_task *io_task = task->dd_data;
3973 	struct iscsi_conn *conn = task->conn;
3974 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3975 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3976 	struct iscsi_wrb *pwrb = NULL;
3977 	unsigned int doorbell = 0;
3978 
3979 	pwrb = io_task->pwrb_handle->pwrb;
3980 	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3981 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3982 
3983 	if (writedir) {
3984 		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3985 		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3986 			      &io_task->cmd_bhs->iscsi_data_pdu,
3987 			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3988 		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3989 			      &io_task->cmd_bhs->iscsi_data_pdu,
3990 			      ISCSI_OPCODE_SCSI_DATA_OUT);
3991 		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3992 			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3993 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3994 			      INI_WR_CMD);
3995 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3996 	} else {
3997 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3998 			      INI_RD_CMD);
3999 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4000 	}
4001 	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
4002 	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
4003 	       &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
4004 
4005 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4006 		      cpu_to_be16(*(unsigned short *)
4007 				  &io_task->cmd_bhs->iscsi_hdr.lun));
4008 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4009 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4010 		      io_task->pwrb_handle->wrb_index);
4011 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4012 		      be32_to_cpu(task->cmdsn));
4013 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4014 		      io_task->psgl_handle->sgl_index);
4015 
4016 	hwi_write_sgl(pwrb, sg, num_sg, io_task);
4017 
4018 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4019 		      io_task->pwrb_handle->nxt_wrb_index);
4020 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4021 
4022 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4023 	doorbell |= (io_task->pwrb_handle->wrb_index &
4024 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4025 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4026 
4027 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4028 	return 0;
4029 }
4030 
4031 static int beiscsi_mtask(struct iscsi_task *task)
4032 {
4033 	struct beiscsi_io_task *io_task = task->dd_data;
4034 	struct iscsi_conn *conn = task->conn;
4035 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4036 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4037 	struct iscsi_wrb *pwrb = NULL;
4038 	unsigned int doorbell = 0;
4039 	unsigned int cid;
4040 
4041 	cid = beiscsi_conn->beiscsi_conn_cid;
4042 	pwrb = io_task->pwrb_handle->pwrb;
4043 	memset(pwrb, 0, sizeof(*pwrb));
4044 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4045 		      be32_to_cpu(task->cmdsn));
4046 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4047 		      io_task->pwrb_handle->wrb_index);
4048 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4049 		      io_task->psgl_handle->sgl_index);
4050 
4051 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4052 	case ISCSI_OP_LOGIN:
4053 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4054 			      TGT_DM_CMD);
4055 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4056 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4057 		hwi_write_buffer(pwrb, task);
4058 		break;
4059 	case ISCSI_OP_NOOP_OUT:
4060 		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4061 			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4062 				      TGT_DM_CMD);
4063 			AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4064 				      pwrb, 0);
4065 			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4066 		} else {
4067 			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4068 				      INI_RD_CMD);
4069 			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4070 		}
4071 		hwi_write_buffer(pwrb, task);
4072 		break;
4073 	case ISCSI_OP_TEXT:
4074 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4075 			      TGT_DM_CMD);
4076 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4077 		hwi_write_buffer(pwrb, task);
4078 		break;
4079 	case ISCSI_OP_SCSI_TMFUNC:
4080 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4081 			      INI_TMF_CMD);
4082 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4083 		hwi_write_buffer(pwrb, task);
4084 		break;
4085 	case ISCSI_OP_LOGOUT:
4086 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4087 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4088 			      HWH_TYPE_LOGOUT);
4089 		hwi_write_buffer(pwrb, task);
4090 		break;
4091 
4092 	default:
4093 		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
4094 			 task->hdr->opcode & ISCSI_OPCODE_MASK);
4095 		return -EINVAL;
4096 	}
4097 
4098 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4099 		      task->data_count);
4100 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4101 		      io_task->pwrb_handle->nxt_wrb_index);
4102 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4103 
4104 	doorbell |= cid & DB_WRB_POST_CID_MASK;
4105 	doorbell |= (io_task->pwrb_handle->wrb_index &
4106 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4107 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4108 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4109 	return 0;
4110 }
4111 
4112 static int beiscsi_task_xmit(struct iscsi_task *task)
4113 {
4114 	struct beiscsi_io_task *io_task = task->dd_data;
4115 	struct scsi_cmnd *sc = task->sc;
4116 	struct scatterlist *sg;
4117 	int num_sg;
4118 	unsigned int  writedir = 0, xferlen = 0;
4119 
4120 	if (!sc)
4121 		return beiscsi_mtask(task);
4122 
4123 	io_task->scsi_cmnd = sc;
4124 	num_sg = scsi_dma_map(sc);
4125 	if (num_sg < 0) {
4126 		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4127 		return num_sg;
4128 	}
4129 	xferlen = scsi_bufflen(sc);
4130 	sg = scsi_sglist(sc);
4131 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
4132 		writedir = 1;
4133 		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
4134 			 task->imm_count);
4135 	} else
4136 		writedir = 0;
4137 	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4138 }
4139 
4140 /**
4141  * beiscsi_bsg_request - handle bsg request from ISCSI transport
4142  * @job: job to handle
4143  */
4144 static int beiscsi_bsg_request(struct bsg_job *job)
4145 {
4146 	struct Scsi_Host *shost;
4147 	struct beiscsi_hba *phba;
4148 	struct iscsi_bsg_request *bsg_req = job->request;
4149 	int rc = -EINVAL;
4150 	unsigned int tag;
4151 	struct be_dma_mem nonemb_cmd;
4152 	struct be_cmd_resp_hdr *resp;
4153 	struct iscsi_bsg_reply *bsg_reply = job->reply;
4154 	unsigned short status, extd_status;
4155 
4156 	shost = iscsi_job_to_shost(job);
4157 	phba = iscsi_host_priv(shost);
4158 
4159 	switch (bsg_req->msgcode) {
4160 	case ISCSI_BSG_HST_VENDOR:
4161 		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4162 					job->request_payload.payload_len,
4163 					&nonemb_cmd.dma);
4164 		if (nonemb_cmd.va == NULL) {
4165 			SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for "
4166 				 "beiscsi_bsg_request\n");
4167 			return -EIO;
4168 		}
4169 		tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4170 						  &nonemb_cmd);
4171 		if (!tag) {
4172 			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
4173 			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4174 					    nonemb_cmd.va, nonemb_cmd.dma);
4175 			return -EAGAIN;
4176 		} else
4177 			wait_event_interruptible(phba->ctrl.mcc_wait[tag],
4178 						 phba->ctrl.mcc_numtag[tag]);
4179 		extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
4180 		status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
4181 		free_mcc_tag(&phba->ctrl, tag);
4182 		resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4183 		sg_copy_from_buffer(job->reply_payload.sg_list,
4184 				    job->reply_payload.sg_cnt,
4185 				    nonemb_cmd.va, (resp->response_length
4186 				    + sizeof(*resp)));
4187 		bsg_reply->reply_payload_rcv_len = resp->response_length;
4188 		bsg_reply->result = status;
4189 		bsg_job_done(job, bsg_reply->result,
4190 			     bsg_reply->reply_payload_rcv_len);
4191 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4192 				    nonemb_cmd.va, nonemb_cmd.dma);
4193 		if (status || extd_status) {
4194 			SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
4195 				 " status = %d extd_status = %d\n",
4196 				 status, extd_status);
4197 			return -EIO;
4198 		}
4199 		break;
4200 
4201 	default:
4202 		SE_DEBUG(DBG_LVL_1, "Unsupported bsg command: 0x%x\n",
4203 			 bsg_req->msgcode);
4204 		break;
4205 	}
4206 
4207 	return rc;
4208 }
4209 
4210 static void beiscsi_quiesce(struct beiscsi_hba *phba)
4211 {
4212 	struct hwi_controller *phwi_ctrlr;
4213 	struct hwi_context_memory *phwi_context;
4214 	struct be_eq_obj *pbe_eq;
4215 	unsigned int i, msix_vec;
4216 	u8 *real_offset = 0;
4217 	u32 value = 0;
4218 
4219 	phwi_ctrlr = phba->phwi_ctrlr;
4220 	phwi_context = phwi_ctrlr->phwi_ctxt;
4221 	hwi_disable_intr(phba);
4222 	if (phba->msix_enabled) {
4223 		for (i = 0; i <= phba->num_cpus; i++) {
4224 			msix_vec = phba->msix_entries[i].vector;
4225 			free_irq(msix_vec, &phwi_context->be_eq[i]);
4226 			kfree(phba->msi_name[i]);
4227 		}
4228 	} else
4229 		if (phba->pcidev->irq)
4230 			free_irq(phba->pcidev->irq, phba);
4231 	pci_disable_msix(phba->pcidev);
4232 	destroy_workqueue(phba->wq);
4233 	if (blk_iopoll_enabled)
4234 		for (i = 0; i < phba->num_cpus; i++) {
4235 			pbe_eq = &phwi_context->be_eq[i];
4236 			blk_iopoll_disable(&pbe_eq->iopoll);
4237 		}
4238 
4239 	beiscsi_clean_port(phba);
4240 	beiscsi_free_mem(phba);
4241 	real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4242 
4243 	value = readl((void *)real_offset);
4244 
4245 	if (value & 0x00010000) {
4246 		value &= 0xfffeffff;
4247 		writel(value, (void *)real_offset);
4248 	}
4249 	beiscsi_unmap_pci_function(phba);
4250 	pci_free_consistent(phba->pcidev,
4251 			    phba->ctrl.mbox_mem_alloced.size,
4252 			    phba->ctrl.mbox_mem_alloced.va,
4253 			    phba->ctrl.mbox_mem_alloced.dma);
4254 }
4255 
4256 static void beiscsi_remove(struct pci_dev *pcidev)
4257 {
4258 
4259 	struct beiscsi_hba *phba = NULL;
4260 
4261 	phba = pci_get_drvdata(pcidev);
4262 	if (!phba) {
4263 		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4264 		return;
4265 	}
4266 
4267 	beiscsi_destroy_def_ifaces(phba);
4268 	beiscsi_quiesce(phba);
4269 	iscsi_boot_destroy_kset(phba->boot_kset);
4270 	iscsi_host_remove(phba->shost);
4271 	pci_dev_put(phba->pcidev);
4272 	iscsi_host_free(phba->shost);
4273 	pci_disable_device(pcidev);
4274 }
4275 
4276 static void beiscsi_shutdown(struct pci_dev *pcidev)
4277 {
4278 
4279 	struct beiscsi_hba *phba = NULL;
4280 
4281 	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4282 	if (!phba) {
4283 		dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4284 		return;
4285 	}
4286 
4287 	beiscsi_quiesce(phba);
4288 	pci_disable_device(pcidev);
4289 }
4290 
4291 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4292 {
4293 	int i, status;
4294 
4295 	for (i = 0; i <= phba->num_cpus; i++)
4296 		phba->msix_entries[i].entry = i;
4297 
4298 	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4299 				 (phba->num_cpus + 1));
4300 	if (!status)
4301 		phba->msix_enabled = true;
4302 
4303 	return;
4304 }
4305 
4306 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4307 				const struct pci_device_id *id)
4308 {
4309 	struct beiscsi_hba *phba = NULL;
4310 	struct hwi_controller *phwi_ctrlr;
4311 	struct hwi_context_memory *phwi_context;
4312 	struct be_eq_obj *pbe_eq;
4313 	int ret, num_cpus, i;
4314 	u8 *real_offset = 0;
4315 	u32 value = 0;
4316 
4317 	ret = beiscsi_enable_pci(pcidev);
4318 	if (ret < 0) {
4319 		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4320 			" Failed to enable pci device\n");
4321 		return ret;
4322 	}
4323 
4324 	phba = beiscsi_hba_alloc(pcidev);
4325 	if (!phba) {
4326 		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4327 			" Failed in beiscsi_hba_alloc\n");
4328 		goto disable_pci;
4329 	}
4330 
4331 	switch (pcidev->device) {
4332 	case BE_DEVICE_ID1:
4333 	case OC_DEVICE_ID1:
4334 	case OC_DEVICE_ID2:
4335 		phba->generation = BE_GEN2;
4336 		break;
4337 	case BE_DEVICE_ID2:
4338 	case OC_DEVICE_ID3:
4339 		phba->generation = BE_GEN3;
4340 		break;
4341 	default:
4342 		phba->generation = 0;
4343 	}
4344 
4345 	if (enable_msix)
4346 		num_cpus = find_num_cpus();
4347 	else
4348 		num_cpus = 1;
4349 	phba->num_cpus = num_cpus;
4350 	SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
4351 
4352 	if (enable_msix) {
4353 		beiscsi_msix_enable(phba);
4354 		if (!phba->msix_enabled)
4355 			phba->num_cpus = 1;
4356 	}
4357 	ret = be_ctrl_init(phba, pcidev);
4358 	if (ret) {
4359 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4360 				"Failed in be_ctrl_init\n");
4361 		goto hba_free;
4362 	}
4363 
4364 	if (!num_hba) {
4365 		real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4366 		value = readl((void *)real_offset);
4367 		if (value & 0x00010000) {
4368 			gcrashmode++;
4369 			shost_printk(KERN_ERR, phba->shost,
4370 				"Loading Driver in crashdump mode\n");
4371 			ret = beiscsi_cmd_reset_function(phba);
4372 			if (ret) {
4373 				shost_printk(KERN_ERR, phba->shost,
4374 					"Reset Failed. Aborting Crashdump\n");
4375 				goto hba_free;
4376 			}
4377 			ret = be_chk_reset_complete(phba);
4378 			if (ret) {
4379 				shost_printk(KERN_ERR, phba->shost,
4380 					"Failed to get out of reset."
4381 					"Aborting Crashdump\n");
4382 				goto hba_free;
4383 			}
4384 		} else {
4385 			value |= 0x00010000;
4386 			writel(value, (void *)real_offset);
4387 			num_hba++;
4388 		}
4389 	}
4390 
4391 	spin_lock_init(&phba->io_sgl_lock);
4392 	spin_lock_init(&phba->mgmt_sgl_lock);
4393 	spin_lock_init(&phba->isr_lock);
4394 	ret = mgmt_get_fw_config(&phba->ctrl, phba);
4395 	if (ret != 0) {
4396 		shost_printk(KERN_ERR, phba->shost,
4397 			     "Error getting fw config\n");
4398 		goto free_port;
4399 	}
4400 	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4401 	beiscsi_get_params(phba);
4402 	phba->shost->can_queue = phba->params.ios_per_ctrl;
4403 	ret = beiscsi_init_port(phba);
4404 	if (ret < 0) {
4405 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4406 			     "Failed in beiscsi_init_port\n");
4407 		goto free_port;
4408 	}
4409 
4410 	for (i = 0; i < MAX_MCC_CMD ; i++) {
4411 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4412 		phba->ctrl.mcc_tag[i] = i + 1;
4413 		phba->ctrl.mcc_numtag[i + 1] = 0;
4414 		phba->ctrl.mcc_tag_available++;
4415 	}
4416 
4417 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4418 
4419 	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4420 		 phba->shost->host_no);
4421 	phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4422 	if (!phba->wq) {
4423 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4424 				"Failed to allocate work queue\n");
4425 		goto free_twq;
4426 	}
4427 
4428 	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4429 
4430 	phwi_ctrlr = phba->phwi_ctrlr;
4431 	phwi_context = phwi_ctrlr->phwi_ctxt;
4432 	if (blk_iopoll_enabled) {
4433 		for (i = 0; i < phba->num_cpus; i++) {
4434 			pbe_eq = &phwi_context->be_eq[i];
4435 			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4436 					be_iopoll);
4437 			blk_iopoll_enable(&pbe_eq->iopoll);
4438 		}
4439 	}
4440 	ret = beiscsi_init_irqs(phba);
4441 	if (ret < 0) {
4442 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4443 			     "Failed to beiscsi_init_irqs\n");
4444 		goto free_blkenbld;
4445 	}
4446 	hwi_enable_intr(phba);
4447 
4448 	if (beiscsi_setup_boot_info(phba))
4449 		/*
4450 		 * log error but continue, because we may not be using
4451 		 * iscsi boot.
4452 		 */
4453 		shost_printk(KERN_ERR, phba->shost, "Could not set up "
4454 			     "iSCSI boot info.\n");
4455 
4456 	beiscsi_create_def_ifaces(phba);
4457 	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4458 	return 0;
4459 
4460 free_blkenbld:
4461 	destroy_workqueue(phba->wq);
4462 	if (blk_iopoll_enabled)
4463 		for (i = 0; i < phba->num_cpus; i++) {
4464 			pbe_eq = &phwi_context->be_eq[i];
4465 			blk_iopoll_disable(&pbe_eq->iopoll);
4466 		}
4467 free_twq:
4468 	beiscsi_clean_port(phba);
4469 	beiscsi_free_mem(phba);
4470 free_port:
4471 	real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4472 
4473 	value = readl((void *)real_offset);
4474 
4475 	if (value & 0x00010000) {
4476 		value &= 0xfffeffff;
4477 		writel(value, (void *)real_offset);
4478 	}
4479 
4480 	pci_free_consistent(phba->pcidev,
4481 			    phba->ctrl.mbox_mem_alloced.size,
4482 			    phba->ctrl.mbox_mem_alloced.va,
4483 			   phba->ctrl.mbox_mem_alloced.dma);
4484 	beiscsi_unmap_pci_function(phba);
4485 hba_free:
4486 	if (phba->msix_enabled)
4487 		pci_disable_msix(phba->pcidev);
4488 	iscsi_host_remove(phba->shost);
4489 	pci_dev_put(phba->pcidev);
4490 	iscsi_host_free(phba->shost);
4491 disable_pci:
4492 	pci_disable_device(pcidev);
4493 	return ret;
4494 }
4495 
4496 struct iscsi_transport beiscsi_iscsi_transport = {
4497 	.owner = THIS_MODULE,
4498 	.name = DRV_NAME,
4499 	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4500 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4501 	.create_session = beiscsi_session_create,
4502 	.destroy_session = beiscsi_session_destroy,
4503 	.create_conn = beiscsi_conn_create,
4504 	.bind_conn = beiscsi_conn_bind,
4505 	.destroy_conn = iscsi_conn_teardown,
4506 	.attr_is_visible = be2iscsi_attr_is_visible,
4507 	.set_iface_param = be2iscsi_iface_set_param,
4508 	.get_iface_param = be2iscsi_iface_get_param,
4509 	.set_param = beiscsi_set_param,
4510 	.get_conn_param = iscsi_conn_get_param,
4511 	.get_session_param = iscsi_session_get_param,
4512 	.get_host_param = beiscsi_get_host_param,
4513 	.start_conn = beiscsi_conn_start,
4514 	.stop_conn = iscsi_conn_stop,
4515 	.send_pdu = iscsi_conn_send_pdu,
4516 	.xmit_task = beiscsi_task_xmit,
4517 	.cleanup_task = beiscsi_cleanup_task,
4518 	.alloc_pdu = beiscsi_alloc_pdu,
4519 	.parse_pdu_itt = beiscsi_parse_pdu,
4520 	.get_stats = beiscsi_conn_get_stats,
4521 	.get_ep_param = beiscsi_ep_get_param,
4522 	.ep_connect = beiscsi_ep_connect,
4523 	.ep_poll = beiscsi_ep_poll,
4524 	.ep_disconnect = beiscsi_ep_disconnect,
4525 	.session_recovery_timedout = iscsi_session_recovery_timedout,
4526 	.bsg_request = beiscsi_bsg_request,
4527 };
4528 
4529 static struct pci_driver beiscsi_pci_driver = {
4530 	.name = DRV_NAME,
4531 	.probe = beiscsi_dev_probe,
4532 	.remove = beiscsi_remove,
4533 	.shutdown = beiscsi_shutdown,
4534 	.id_table = beiscsi_pci_id_table
4535 };
4536 
4537 
4538 static int __init beiscsi_module_init(void)
4539 {
4540 	int ret;
4541 
4542 	beiscsi_scsi_transport =
4543 			iscsi_register_transport(&beiscsi_iscsi_transport);
4544 	if (!beiscsi_scsi_transport) {
4545 		SE_DEBUG(DBG_LVL_1,
4546 			 "beiscsi_module_init - Unable to  register beiscsi"
4547 			 "transport.\n");
4548 		return -ENOMEM;
4549 	}
4550 	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4551 		 &beiscsi_iscsi_transport);
4552 
4553 	ret = pci_register_driver(&beiscsi_pci_driver);
4554 	if (ret) {
4555 		SE_DEBUG(DBG_LVL_1,
4556 			 "beiscsi_module_init - Unable to  register"
4557 			 "beiscsi pci driver.\n");
4558 		goto unregister_iscsi_transport;
4559 	}
4560 	return 0;
4561 
4562 unregister_iscsi_transport:
4563 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4564 	return ret;
4565 }
4566 
4567 static void __exit beiscsi_module_exit(void)
4568 {
4569 	pci_unregister_driver(&beiscsi_pci_driver);
4570 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4571 }
4572 
4573 module_init(beiscsi_module_init);
4574 module_exit(beiscsi_module_exit);
4575