1 /** 2 * Copyright (C) 2005 - 2011 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 11 * 12 * Contact Information: 13 * linux-drivers@emulex.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 32 #include <scsi/libiscsi.h> 33 #include <scsi/scsi_transport_iscsi.h> 34 #include <scsi/scsi_transport.h> 35 #include <scsi/scsi_cmnd.h> 36 #include <scsi/scsi_device.h> 37 #include <scsi/scsi_host.h> 38 #include <scsi/scsi.h> 39 #include "be_main.h" 40 #include "be_iscsi.h" 41 #include "be_mgmt.h" 42 43 static unsigned int be_iopoll_budget = 10; 44 static unsigned int be_max_phys_size = 64; 45 static unsigned int enable_msix = 1; 46 static unsigned int gcrashmode = 0; 47 static unsigned int num_hba = 0; 48 49 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 50 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 51 MODULE_AUTHOR("ServerEngines Corporation"); 52 MODULE_LICENSE("GPL"); 53 module_param(be_iopoll_budget, int, 0); 54 module_param(enable_msix, int, 0); 55 module_param(be_max_phys_size, uint, S_IRUGO); 56 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically" 57 "contiguous memory that can be allocated." 58 "Range is 16 - 128"); 59 60 static int beiscsi_slave_configure(struct scsi_device *sdev) 61 { 62 blk_queue_max_segment_size(sdev->request_queue, 65536); 63 return 0; 64 } 65 66 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 67 { 68 struct iscsi_cls_session *cls_session; 69 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 70 struct beiscsi_io_task *aborted_io_task; 71 struct iscsi_conn *conn; 72 struct beiscsi_conn *beiscsi_conn; 73 struct beiscsi_hba *phba; 74 struct iscsi_session *session; 75 struct invalidate_command_table *inv_tbl; 76 struct be_dma_mem nonemb_cmd; 77 unsigned int cid, tag, num_invalidate; 78 79 cls_session = starget_to_session(scsi_target(sc->device)); 80 session = cls_session->dd_data; 81 82 spin_lock_bh(&session->lock); 83 if (!aborted_task || !aborted_task->sc) { 84 /* we raced */ 85 spin_unlock_bh(&session->lock); 86 return SUCCESS; 87 } 88 89 aborted_io_task = aborted_task->dd_data; 90 if (!aborted_io_task->scsi_cmnd) { 91 /* raced or invalid command */ 92 spin_unlock_bh(&session->lock); 93 return SUCCESS; 94 } 95 spin_unlock_bh(&session->lock); 96 conn = aborted_task->conn; 97 beiscsi_conn = conn->dd_data; 98 phba = beiscsi_conn->phba; 99 100 /* invalidate iocb */ 101 cid = beiscsi_conn->beiscsi_conn_cid; 102 inv_tbl = phba->inv_tbl; 103 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 104 inv_tbl->cid = cid; 105 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 106 num_invalidate = 1; 107 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 108 sizeof(struct invalidate_commands_params_in), 109 &nonemb_cmd.dma); 110 if (nonemb_cmd.va == NULL) { 111 SE_DEBUG(DBG_LVL_1, 112 "Failed to allocate memory for" 113 "mgmt_invalidate_icds\n"); 114 return FAILED; 115 } 116 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 117 118 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 119 cid, &nonemb_cmd); 120 if (!tag) { 121 shost_printk(KERN_WARNING, phba->shost, 122 "mgmt_invalidate_icds could not be" 123 " submitted\n"); 124 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 125 nonemb_cmd.va, nonemb_cmd.dma); 126 127 return FAILED; 128 } else { 129 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 130 phba->ctrl.mcc_numtag[tag]); 131 free_mcc_tag(&phba->ctrl, tag); 132 } 133 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 134 nonemb_cmd.va, nonemb_cmd.dma); 135 return iscsi_eh_abort(sc); 136 } 137 138 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 139 { 140 struct iscsi_task *abrt_task; 141 struct beiscsi_io_task *abrt_io_task; 142 struct iscsi_conn *conn; 143 struct beiscsi_conn *beiscsi_conn; 144 struct beiscsi_hba *phba; 145 struct iscsi_session *session; 146 struct iscsi_cls_session *cls_session; 147 struct invalidate_command_table *inv_tbl; 148 struct be_dma_mem nonemb_cmd; 149 unsigned int cid, tag, i, num_invalidate; 150 int rc = FAILED; 151 152 /* invalidate iocbs */ 153 cls_session = starget_to_session(scsi_target(sc->device)); 154 session = cls_session->dd_data; 155 spin_lock_bh(&session->lock); 156 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) 157 goto unlock; 158 159 conn = session->leadconn; 160 beiscsi_conn = conn->dd_data; 161 phba = beiscsi_conn->phba; 162 cid = beiscsi_conn->beiscsi_conn_cid; 163 inv_tbl = phba->inv_tbl; 164 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 165 num_invalidate = 0; 166 for (i = 0; i < conn->session->cmds_max; i++) { 167 abrt_task = conn->session->cmds[i]; 168 abrt_io_task = abrt_task->dd_data; 169 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 170 continue; 171 172 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 173 continue; 174 175 inv_tbl->cid = cid; 176 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 177 num_invalidate++; 178 inv_tbl++; 179 } 180 spin_unlock_bh(&session->lock); 181 inv_tbl = phba->inv_tbl; 182 183 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 184 sizeof(struct invalidate_commands_params_in), 185 &nonemb_cmd.dma); 186 if (nonemb_cmd.va == NULL) { 187 SE_DEBUG(DBG_LVL_1, 188 "Failed to allocate memory for" 189 "mgmt_invalidate_icds\n"); 190 return FAILED; 191 } 192 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 193 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 194 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 195 cid, &nonemb_cmd); 196 if (!tag) { 197 shost_printk(KERN_WARNING, phba->shost, 198 "mgmt_invalidate_icds could not be" 199 " submitted\n"); 200 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 201 nonemb_cmd.va, nonemb_cmd.dma); 202 return FAILED; 203 } else { 204 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 205 phba->ctrl.mcc_numtag[tag]); 206 free_mcc_tag(&phba->ctrl, tag); 207 } 208 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 209 nonemb_cmd.va, nonemb_cmd.dma); 210 return iscsi_eh_device_reset(sc); 211 unlock: 212 spin_unlock_bh(&session->lock); 213 return rc; 214 } 215 216 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 217 { 218 struct beiscsi_hba *phba = data; 219 struct mgmt_session_info *boot_sess = &phba->boot_sess; 220 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 221 char *str = buf; 222 int rc; 223 224 switch (type) { 225 case ISCSI_BOOT_TGT_NAME: 226 rc = sprintf(buf, "%.*s\n", 227 (int)strlen(boot_sess->target_name), 228 (char *)&boot_sess->target_name); 229 break; 230 case ISCSI_BOOT_TGT_IP_ADDR: 231 if (boot_conn->dest_ipaddr.ip_type == 0x1) 232 rc = sprintf(buf, "%pI4\n", 233 (char *)&boot_conn->dest_ipaddr.ip_address); 234 else 235 rc = sprintf(str, "%pI6\n", 236 (char *)&boot_conn->dest_ipaddr.ip_address); 237 break; 238 case ISCSI_BOOT_TGT_PORT: 239 rc = sprintf(str, "%d\n", boot_conn->dest_port); 240 break; 241 242 case ISCSI_BOOT_TGT_CHAP_NAME: 243 rc = sprintf(str, "%.*s\n", 244 boot_conn->negotiated_login_options.auth_data.chap. 245 target_chap_name_length, 246 (char *)&boot_conn->negotiated_login_options. 247 auth_data.chap.target_chap_name); 248 break; 249 case ISCSI_BOOT_TGT_CHAP_SECRET: 250 rc = sprintf(str, "%.*s\n", 251 boot_conn->negotiated_login_options.auth_data.chap. 252 target_secret_length, 253 (char *)&boot_conn->negotiated_login_options. 254 auth_data.chap.target_secret); 255 break; 256 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 257 rc = sprintf(str, "%.*s\n", 258 boot_conn->negotiated_login_options.auth_data.chap. 259 intr_chap_name_length, 260 (char *)&boot_conn->negotiated_login_options. 261 auth_data.chap.intr_chap_name); 262 break; 263 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 264 rc = sprintf(str, "%.*s\n", 265 boot_conn->negotiated_login_options.auth_data.chap. 266 intr_secret_length, 267 (char *)&boot_conn->negotiated_login_options. 268 auth_data.chap.intr_secret); 269 break; 270 case ISCSI_BOOT_TGT_FLAGS: 271 rc = sprintf(str, "2\n"); 272 break; 273 case ISCSI_BOOT_TGT_NIC_ASSOC: 274 rc = sprintf(str, "0\n"); 275 break; 276 default: 277 rc = -ENOSYS; 278 break; 279 } 280 return rc; 281 } 282 283 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 284 { 285 struct beiscsi_hba *phba = data; 286 char *str = buf; 287 int rc; 288 289 switch (type) { 290 case ISCSI_BOOT_INI_INITIATOR_NAME: 291 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 292 break; 293 default: 294 rc = -ENOSYS; 295 break; 296 } 297 return rc; 298 } 299 300 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 301 { 302 struct beiscsi_hba *phba = data; 303 char *str = buf; 304 int rc; 305 306 switch (type) { 307 case ISCSI_BOOT_ETH_FLAGS: 308 rc = sprintf(str, "2\n"); 309 break; 310 case ISCSI_BOOT_ETH_INDEX: 311 rc = sprintf(str, "0\n"); 312 break; 313 case ISCSI_BOOT_ETH_MAC: 314 rc = beiscsi_get_macaddr(buf, phba); 315 if (rc < 0) { 316 SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n"); 317 return rc; 318 } 319 break; 320 default: 321 rc = -ENOSYS; 322 break; 323 } 324 return rc; 325 } 326 327 328 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 329 { 330 umode_t rc; 331 332 switch (type) { 333 case ISCSI_BOOT_TGT_NAME: 334 case ISCSI_BOOT_TGT_IP_ADDR: 335 case ISCSI_BOOT_TGT_PORT: 336 case ISCSI_BOOT_TGT_CHAP_NAME: 337 case ISCSI_BOOT_TGT_CHAP_SECRET: 338 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 339 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 340 case ISCSI_BOOT_TGT_NIC_ASSOC: 341 case ISCSI_BOOT_TGT_FLAGS: 342 rc = S_IRUGO; 343 break; 344 default: 345 rc = 0; 346 break; 347 } 348 return rc; 349 } 350 351 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 352 { 353 umode_t rc; 354 355 switch (type) { 356 case ISCSI_BOOT_INI_INITIATOR_NAME: 357 rc = S_IRUGO; 358 break; 359 default: 360 rc = 0; 361 break; 362 } 363 return rc; 364 } 365 366 367 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 368 { 369 umode_t rc; 370 371 switch (type) { 372 case ISCSI_BOOT_ETH_FLAGS: 373 case ISCSI_BOOT_ETH_MAC: 374 case ISCSI_BOOT_ETH_INDEX: 375 rc = S_IRUGO; 376 break; 377 default: 378 rc = 0; 379 break; 380 } 381 return rc; 382 } 383 384 /*------------------- PCI Driver operations and data ----------------- */ 385 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 386 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 387 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 388 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 389 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 390 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 391 { 0 } 392 }; 393 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 394 395 static struct scsi_host_template beiscsi_sht = { 396 .module = THIS_MODULE, 397 .name = "ServerEngines 10Gbe open-iscsi Initiator Driver", 398 .proc_name = DRV_NAME, 399 .queuecommand = iscsi_queuecommand, 400 .change_queue_depth = iscsi_change_queue_depth, 401 .slave_configure = beiscsi_slave_configure, 402 .target_alloc = iscsi_target_alloc, 403 .eh_abort_handler = beiscsi_eh_abort, 404 .eh_device_reset_handler = beiscsi_eh_device_reset, 405 .eh_target_reset_handler = iscsi_eh_session_reset, 406 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 407 .can_queue = BE2_IO_DEPTH, 408 .this_id = -1, 409 .max_sectors = BEISCSI_MAX_SECTORS, 410 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 411 .use_clustering = ENABLE_CLUSTERING, 412 }; 413 414 static struct scsi_transport_template *beiscsi_scsi_transport; 415 416 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 417 { 418 struct beiscsi_hba *phba; 419 struct Scsi_Host *shost; 420 421 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 422 if (!shost) { 423 dev_err(&pcidev->dev, "beiscsi_hba_alloc -" 424 "iscsi_host_alloc failed\n"); 425 return NULL; 426 } 427 shost->dma_boundary = pcidev->dma_mask; 428 shost->max_id = BE2_MAX_SESSIONS; 429 shost->max_channel = 0; 430 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 431 shost->max_lun = BEISCSI_NUM_MAX_LUN; 432 shost->transportt = beiscsi_scsi_transport; 433 phba = iscsi_host_priv(shost); 434 memset(phba, 0, sizeof(*phba)); 435 phba->shost = shost; 436 phba->pcidev = pci_dev_get(pcidev); 437 pci_set_drvdata(pcidev, phba); 438 439 if (iscsi_host_add(shost, &phba->pcidev->dev)) 440 goto free_devices; 441 442 return phba; 443 444 free_devices: 445 pci_dev_put(phba->pcidev); 446 iscsi_host_free(phba->shost); 447 return NULL; 448 } 449 450 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 451 { 452 if (phba->csr_va) { 453 iounmap(phba->csr_va); 454 phba->csr_va = NULL; 455 } 456 if (phba->db_va) { 457 iounmap(phba->db_va); 458 phba->db_va = NULL; 459 } 460 if (phba->pci_va) { 461 iounmap(phba->pci_va); 462 phba->pci_va = NULL; 463 } 464 } 465 466 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 467 struct pci_dev *pcidev) 468 { 469 u8 __iomem *addr; 470 int pcicfg_reg; 471 472 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 473 pci_resource_len(pcidev, 2)); 474 if (addr == NULL) 475 return -ENOMEM; 476 phba->ctrl.csr = addr; 477 phba->csr_va = addr; 478 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 479 480 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 481 if (addr == NULL) 482 goto pci_map_err; 483 phba->ctrl.db = addr; 484 phba->db_va = addr; 485 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 486 487 if (phba->generation == BE_GEN2) 488 pcicfg_reg = 1; 489 else 490 pcicfg_reg = 0; 491 492 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 493 pci_resource_len(pcidev, pcicfg_reg)); 494 495 if (addr == NULL) 496 goto pci_map_err; 497 phba->ctrl.pcicfg = addr; 498 phba->pci_va = addr; 499 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 500 return 0; 501 502 pci_map_err: 503 beiscsi_unmap_pci_function(phba); 504 return -ENOMEM; 505 } 506 507 static int beiscsi_enable_pci(struct pci_dev *pcidev) 508 { 509 int ret; 510 511 ret = pci_enable_device(pcidev); 512 if (ret) { 513 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device " 514 "failed. Returning -ENODEV\n"); 515 return ret; 516 } 517 518 pci_set_master(pcidev); 519 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 520 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 521 if (ret) { 522 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 523 pci_disable_device(pcidev); 524 return ret; 525 } 526 } 527 return 0; 528 } 529 530 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 531 { 532 struct be_ctrl_info *ctrl = &phba->ctrl; 533 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 534 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 535 int status = 0; 536 537 ctrl->pdev = pdev; 538 status = beiscsi_map_pci_bars(phba, pdev); 539 if (status) 540 return status; 541 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 542 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 543 mbox_mem_alloc->size, 544 &mbox_mem_alloc->dma); 545 if (!mbox_mem_alloc->va) { 546 beiscsi_unmap_pci_function(phba); 547 status = -ENOMEM; 548 return status; 549 } 550 551 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 552 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 553 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 554 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 555 spin_lock_init(&ctrl->mbox_lock); 556 spin_lock_init(&phba->ctrl.mcc_lock); 557 spin_lock_init(&phba->ctrl.mcc_cq_lock); 558 559 return status; 560 } 561 562 static void beiscsi_get_params(struct beiscsi_hba *phba) 563 { 564 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count 565 - (phba->fw_config.iscsi_cid_count 566 + BE2_TMFS 567 + BE2_NOPOUT_REQ)); 568 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 569 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; 570 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 571 phba->params.num_sge_per_io = BE2_SGE; 572 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 573 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 574 phba->params.eq_timer = 64; 575 phba->params.num_eq_entries = 576 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 577 + BE2_TMFS) / 512) + 1) * 512; 578 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 579 ? 1024 : phba->params.num_eq_entries; 580 SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n", 581 phba->params.num_eq_entries); 582 phba->params.num_cq_entries = 583 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 584 + BE2_TMFS) / 512) + 1) * 512; 585 phba->params.wrbs_per_cxn = 256; 586 } 587 588 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 589 unsigned int id, unsigned int clr_interrupt, 590 unsigned int num_processed, 591 unsigned char rearm, unsigned char event) 592 { 593 u32 val = 0; 594 val |= id & DB_EQ_RING_ID_MASK; 595 if (rearm) 596 val |= 1 << DB_EQ_REARM_SHIFT; 597 if (clr_interrupt) 598 val |= 1 << DB_EQ_CLR_SHIFT; 599 if (event) 600 val |= 1 << DB_EQ_EVNT_SHIFT; 601 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 602 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 603 } 604 605 /** 606 * be_isr_mcc - The isr routine of the driver. 607 * @irq: Not used 608 * @dev_id: Pointer to host adapter structure 609 */ 610 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 611 { 612 struct beiscsi_hba *phba; 613 struct be_eq_entry *eqe = NULL; 614 struct be_queue_info *eq; 615 struct be_queue_info *mcc; 616 unsigned int num_eq_processed; 617 struct be_eq_obj *pbe_eq; 618 unsigned long flags; 619 620 pbe_eq = dev_id; 621 eq = &pbe_eq->q; 622 phba = pbe_eq->phba; 623 mcc = &phba->ctrl.mcc_obj.cq; 624 eqe = queue_tail_node(eq); 625 if (!eqe) 626 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); 627 628 num_eq_processed = 0; 629 630 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 631 & EQE_VALID_MASK) { 632 if (((eqe->dw[offsetof(struct amap_eq_entry, 633 resource_id) / 32] & 634 EQE_RESID_MASK) >> 16) == mcc->id) { 635 spin_lock_irqsave(&phba->isr_lock, flags); 636 phba->todo_mcc_cq = 1; 637 spin_unlock_irqrestore(&phba->isr_lock, flags); 638 } 639 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 640 queue_tail_inc(eq); 641 eqe = queue_tail_node(eq); 642 num_eq_processed++; 643 } 644 if (phba->todo_mcc_cq) 645 queue_work(phba->wq, &phba->work_cqs); 646 if (num_eq_processed) 647 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 648 649 return IRQ_HANDLED; 650 } 651 652 /** 653 * be_isr_msix - The isr routine of the driver. 654 * @irq: Not used 655 * @dev_id: Pointer to host adapter structure 656 */ 657 static irqreturn_t be_isr_msix(int irq, void *dev_id) 658 { 659 struct beiscsi_hba *phba; 660 struct be_eq_entry *eqe = NULL; 661 struct be_queue_info *eq; 662 struct be_queue_info *cq; 663 unsigned int num_eq_processed; 664 struct be_eq_obj *pbe_eq; 665 unsigned long flags; 666 667 pbe_eq = dev_id; 668 eq = &pbe_eq->q; 669 cq = pbe_eq->cq; 670 eqe = queue_tail_node(eq); 671 if (!eqe) 672 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); 673 674 phba = pbe_eq->phba; 675 num_eq_processed = 0; 676 if (blk_iopoll_enabled) { 677 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 678 & EQE_VALID_MASK) { 679 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 680 blk_iopoll_sched(&pbe_eq->iopoll); 681 682 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 683 queue_tail_inc(eq); 684 eqe = queue_tail_node(eq); 685 num_eq_processed++; 686 } 687 if (num_eq_processed) 688 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 689 690 return IRQ_HANDLED; 691 } else { 692 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 693 & EQE_VALID_MASK) { 694 spin_lock_irqsave(&phba->isr_lock, flags); 695 phba->todo_cq = 1; 696 spin_unlock_irqrestore(&phba->isr_lock, flags); 697 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 698 queue_tail_inc(eq); 699 eqe = queue_tail_node(eq); 700 num_eq_processed++; 701 } 702 if (phba->todo_cq) 703 queue_work(phba->wq, &phba->work_cqs); 704 705 if (num_eq_processed) 706 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 707 708 return IRQ_HANDLED; 709 } 710 } 711 712 /** 713 * be_isr - The isr routine of the driver. 714 * @irq: Not used 715 * @dev_id: Pointer to host adapter structure 716 */ 717 static irqreturn_t be_isr(int irq, void *dev_id) 718 { 719 struct beiscsi_hba *phba; 720 struct hwi_controller *phwi_ctrlr; 721 struct hwi_context_memory *phwi_context; 722 struct be_eq_entry *eqe = NULL; 723 struct be_queue_info *eq; 724 struct be_queue_info *cq; 725 struct be_queue_info *mcc; 726 unsigned long flags, index; 727 unsigned int num_mcceq_processed, num_ioeq_processed; 728 struct be_ctrl_info *ctrl; 729 struct be_eq_obj *pbe_eq; 730 int isr; 731 732 phba = dev_id; 733 ctrl = &phba->ctrl; 734 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 735 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 736 if (!isr) 737 return IRQ_NONE; 738 739 phwi_ctrlr = phba->phwi_ctrlr; 740 phwi_context = phwi_ctrlr->phwi_ctxt; 741 pbe_eq = &phwi_context->be_eq[0]; 742 743 eq = &phwi_context->be_eq[0].q; 744 mcc = &phba->ctrl.mcc_obj.cq; 745 index = 0; 746 eqe = queue_tail_node(eq); 747 if (!eqe) 748 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n"); 749 750 num_ioeq_processed = 0; 751 num_mcceq_processed = 0; 752 if (blk_iopoll_enabled) { 753 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 754 & EQE_VALID_MASK) { 755 if (((eqe->dw[offsetof(struct amap_eq_entry, 756 resource_id) / 32] & 757 EQE_RESID_MASK) >> 16) == mcc->id) { 758 spin_lock_irqsave(&phba->isr_lock, flags); 759 phba->todo_mcc_cq = 1; 760 spin_unlock_irqrestore(&phba->isr_lock, flags); 761 num_mcceq_processed++; 762 } else { 763 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 764 blk_iopoll_sched(&pbe_eq->iopoll); 765 num_ioeq_processed++; 766 } 767 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 768 queue_tail_inc(eq); 769 eqe = queue_tail_node(eq); 770 } 771 if (num_ioeq_processed || num_mcceq_processed) { 772 if (phba->todo_mcc_cq) 773 queue_work(phba->wq, &phba->work_cqs); 774 775 if ((num_mcceq_processed) && (!num_ioeq_processed)) 776 hwi_ring_eq_db(phba, eq->id, 0, 777 (num_ioeq_processed + 778 num_mcceq_processed) , 1, 1); 779 else 780 hwi_ring_eq_db(phba, eq->id, 0, 781 (num_ioeq_processed + 782 num_mcceq_processed), 0, 1); 783 784 return IRQ_HANDLED; 785 } else 786 return IRQ_NONE; 787 } else { 788 cq = &phwi_context->be_cq[0]; 789 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 790 & EQE_VALID_MASK) { 791 792 if (((eqe->dw[offsetof(struct amap_eq_entry, 793 resource_id) / 32] & 794 EQE_RESID_MASK) >> 16) != cq->id) { 795 spin_lock_irqsave(&phba->isr_lock, flags); 796 phba->todo_mcc_cq = 1; 797 spin_unlock_irqrestore(&phba->isr_lock, flags); 798 } else { 799 spin_lock_irqsave(&phba->isr_lock, flags); 800 phba->todo_cq = 1; 801 spin_unlock_irqrestore(&phba->isr_lock, flags); 802 } 803 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 804 queue_tail_inc(eq); 805 eqe = queue_tail_node(eq); 806 num_ioeq_processed++; 807 } 808 if (phba->todo_cq || phba->todo_mcc_cq) 809 queue_work(phba->wq, &phba->work_cqs); 810 811 if (num_ioeq_processed) { 812 hwi_ring_eq_db(phba, eq->id, 0, 813 num_ioeq_processed, 1, 1); 814 return IRQ_HANDLED; 815 } else 816 return IRQ_NONE; 817 } 818 } 819 820 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 821 { 822 struct pci_dev *pcidev = phba->pcidev; 823 struct hwi_controller *phwi_ctrlr; 824 struct hwi_context_memory *phwi_context; 825 int ret, msix_vec, i, j; 826 827 phwi_ctrlr = phba->phwi_ctrlr; 828 phwi_context = phwi_ctrlr->phwi_ctxt; 829 830 if (phba->msix_enabled) { 831 for (i = 0; i < phba->num_cpus; i++) { 832 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 833 GFP_KERNEL); 834 if (!phba->msi_name[i]) { 835 ret = -ENOMEM; 836 goto free_msix_irqs; 837 } 838 839 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 840 phba->shost->host_no, i); 841 msix_vec = phba->msix_entries[i].vector; 842 ret = request_irq(msix_vec, be_isr_msix, 0, 843 phba->msi_name[i], 844 &phwi_context->be_eq[i]); 845 if (ret) { 846 shost_printk(KERN_ERR, phba->shost, 847 "beiscsi_init_irqs-Failed to" 848 "register msix for i = %d\n", i); 849 kfree(phba->msi_name[i]); 850 goto free_msix_irqs; 851 } 852 } 853 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 854 if (!phba->msi_name[i]) { 855 ret = -ENOMEM; 856 goto free_msix_irqs; 857 } 858 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 859 phba->shost->host_no); 860 msix_vec = phba->msix_entries[i].vector; 861 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 862 &phwi_context->be_eq[i]); 863 if (ret) { 864 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 865 "Failed to register beiscsi_msix_mcc\n"); 866 kfree(phba->msi_name[i]); 867 goto free_msix_irqs; 868 } 869 870 } else { 871 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 872 "beiscsi", phba); 873 if (ret) { 874 shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" 875 "Failed to register irq\\n"); 876 return ret; 877 } 878 } 879 return 0; 880 free_msix_irqs: 881 for (j = i - 1; j >= 0; j--) { 882 kfree(phba->msi_name[j]); 883 msix_vec = phba->msix_entries[j].vector; 884 free_irq(msix_vec, &phwi_context->be_eq[j]); 885 } 886 return ret; 887 } 888 889 static void hwi_ring_cq_db(struct beiscsi_hba *phba, 890 unsigned int id, unsigned int num_processed, 891 unsigned char rearm, unsigned char event) 892 { 893 u32 val = 0; 894 val |= id & DB_CQ_RING_ID_MASK; 895 if (rearm) 896 val |= 1 << DB_CQ_REARM_SHIFT; 897 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 898 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 899 } 900 901 static unsigned int 902 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 903 struct beiscsi_hba *phba, 904 unsigned short cid, 905 struct pdu_base *ppdu, 906 unsigned long pdu_len, 907 void *pbuffer, unsigned long buf_len) 908 { 909 struct iscsi_conn *conn = beiscsi_conn->conn; 910 struct iscsi_session *session = conn->session; 911 struct iscsi_task *task; 912 struct beiscsi_io_task *io_task; 913 struct iscsi_hdr *login_hdr; 914 915 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 916 PDUBASE_OPCODE_MASK) { 917 case ISCSI_OP_NOOP_IN: 918 pbuffer = NULL; 919 buf_len = 0; 920 break; 921 case ISCSI_OP_ASYNC_EVENT: 922 break; 923 case ISCSI_OP_REJECT: 924 WARN_ON(!pbuffer); 925 WARN_ON(!(buf_len == 48)); 926 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n"); 927 break; 928 case ISCSI_OP_LOGIN_RSP: 929 case ISCSI_OP_TEXT_RSP: 930 task = conn->login_task; 931 io_task = task->dd_data; 932 login_hdr = (struct iscsi_hdr *)ppdu; 933 login_hdr->itt = io_task->libiscsi_itt; 934 break; 935 default: 936 shost_printk(KERN_WARNING, phba->shost, 937 "Unrecognized opcode 0x%x in async msg\n", 938 (ppdu-> 939 dw[offsetof(struct amap_pdu_base, opcode) / 32] 940 & PDUBASE_OPCODE_MASK)); 941 return 1; 942 } 943 944 spin_lock_bh(&session->lock); 945 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 946 spin_unlock_bh(&session->lock); 947 return 0; 948 } 949 950 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 951 { 952 struct sgl_handle *psgl_handle; 953 954 if (phba->io_sgl_hndl_avbl) { 955 SE_DEBUG(DBG_LVL_8, 956 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n", 957 phba->io_sgl_alloc_index); 958 psgl_handle = phba->io_sgl_hndl_base[phba-> 959 io_sgl_alloc_index]; 960 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 961 phba->io_sgl_hndl_avbl--; 962 if (phba->io_sgl_alloc_index == (phba->params. 963 ios_per_ctrl - 1)) 964 phba->io_sgl_alloc_index = 0; 965 else 966 phba->io_sgl_alloc_index++; 967 } else 968 psgl_handle = NULL; 969 return psgl_handle; 970 } 971 972 static void 973 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 974 { 975 SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n", 976 phba->io_sgl_free_index); 977 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 978 /* 979 * this can happen if clean_task is called on a task that 980 * failed in xmit_task or alloc_pdu. 981 */ 982 SE_DEBUG(DBG_LVL_8, 983 "Double Free in IO SGL io_sgl_free_index=%d," 984 "value there=%p\n", phba->io_sgl_free_index, 985 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 986 return; 987 } 988 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 989 phba->io_sgl_hndl_avbl++; 990 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 991 phba->io_sgl_free_index = 0; 992 else 993 phba->io_sgl_free_index++; 994 } 995 996 /** 997 * alloc_wrb_handle - To allocate a wrb handle 998 * @phba: The hba pointer 999 * @cid: The cid to use for allocation 1000 * 1001 * This happens under session_lock until submission to chip 1002 */ 1003 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) 1004 { 1005 struct hwi_wrb_context *pwrb_context; 1006 struct hwi_controller *phwi_ctrlr; 1007 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1008 1009 phwi_ctrlr = phba->phwi_ctrlr; 1010 pwrb_context = &phwi_ctrlr->wrb_context[cid]; 1011 if (pwrb_context->wrb_handles_available >= 2) { 1012 pwrb_handle = pwrb_context->pwrb_handle_base[ 1013 pwrb_context->alloc_index]; 1014 pwrb_context->wrb_handles_available--; 1015 if (pwrb_context->alloc_index == 1016 (phba->params.wrbs_per_cxn - 1)) 1017 pwrb_context->alloc_index = 0; 1018 else 1019 pwrb_context->alloc_index++; 1020 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ 1021 pwrb_context->alloc_index]; 1022 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; 1023 } else 1024 pwrb_handle = NULL; 1025 return pwrb_handle; 1026 } 1027 1028 /** 1029 * free_wrb_handle - To free the wrb handle back to pool 1030 * @phba: The hba pointer 1031 * @pwrb_context: The context to free from 1032 * @pwrb_handle: The wrb_handle to free 1033 * 1034 * This happens under session_lock until submission to chip 1035 */ 1036 static void 1037 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1038 struct wrb_handle *pwrb_handle) 1039 { 1040 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1041 pwrb_context->wrb_handles_available++; 1042 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1043 pwrb_context->free_index = 0; 1044 else 1045 pwrb_context->free_index++; 1046 1047 SE_DEBUG(DBG_LVL_8, 1048 "FREE WRB: pwrb_handle=%p free_index=0x%x" 1049 "wrb_handles_available=%d\n", 1050 pwrb_handle, pwrb_context->free_index, 1051 pwrb_context->wrb_handles_available); 1052 } 1053 1054 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1055 { 1056 struct sgl_handle *psgl_handle; 1057 1058 if (phba->eh_sgl_hndl_avbl) { 1059 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1060 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1061 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n", 1062 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index); 1063 phba->eh_sgl_hndl_avbl--; 1064 if (phba->eh_sgl_alloc_index == 1065 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1066 1)) 1067 phba->eh_sgl_alloc_index = 0; 1068 else 1069 phba->eh_sgl_alloc_index++; 1070 } else 1071 psgl_handle = NULL; 1072 return psgl_handle; 1073 } 1074 1075 void 1076 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1077 { 1078 1079 SE_DEBUG(DBG_LVL_8, "In free_mgmt_sgl_handle,eh_sgl_free_index=%d\n", 1080 phba->eh_sgl_free_index); 1081 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1082 /* 1083 * this can happen if clean_task is called on a task that 1084 * failed in xmit_task or alloc_pdu. 1085 */ 1086 SE_DEBUG(DBG_LVL_8, 1087 "Double Free in eh SGL ,eh_sgl_free_index=%d\n", 1088 phba->eh_sgl_free_index); 1089 return; 1090 } 1091 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1092 phba->eh_sgl_hndl_avbl++; 1093 if (phba->eh_sgl_free_index == 1094 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1095 phba->eh_sgl_free_index = 0; 1096 else 1097 phba->eh_sgl_free_index++; 1098 } 1099 1100 static void 1101 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1102 struct iscsi_task *task, struct sol_cqe *psol) 1103 { 1104 struct beiscsi_io_task *io_task = task->dd_data; 1105 struct be_status_bhs *sts_bhs = 1106 (struct be_status_bhs *)io_task->cmd_bhs; 1107 struct iscsi_conn *conn = beiscsi_conn->conn; 1108 unsigned char *sense; 1109 u32 resid = 0, exp_cmdsn, max_cmdsn; 1110 u8 rsp, status, flags; 1111 1112 exp_cmdsn = (psol-> 1113 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1114 & SOL_EXP_CMD_SN_MASK); 1115 max_cmdsn = ((psol-> 1116 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1117 & SOL_EXP_CMD_SN_MASK) + 1118 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1119 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1120 rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32] 1121 & SOL_RESP_MASK) >> 16); 1122 status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32] 1123 & SOL_STS_MASK) >> 8); 1124 flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1125 & SOL_FLAGS_MASK) >> 24) | 0x80; 1126 if (!task->sc) { 1127 if (io_task->scsi_cmnd) 1128 scsi_dma_unmap(io_task->scsi_cmnd); 1129 1130 return; 1131 } 1132 task->sc->result = (DID_OK << 16) | status; 1133 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1134 task->sc->result = DID_ERROR << 16; 1135 goto unmap; 1136 } 1137 1138 /* bidi not initially supported */ 1139 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1140 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 1141 32] & SOL_RES_CNT_MASK); 1142 1143 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1144 task->sc->result = DID_ERROR << 16; 1145 1146 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1147 scsi_set_resid(task->sc, resid); 1148 if (!status && (scsi_bufflen(task->sc) - resid < 1149 task->sc->underflow)) 1150 task->sc->result = DID_ERROR << 16; 1151 } 1152 } 1153 1154 if (status == SAM_STAT_CHECK_CONDITION) { 1155 u16 sense_len; 1156 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1157 1158 sense = sts_bhs->sense_info + sizeof(unsigned short); 1159 sense_len = be16_to_cpu(*slen); 1160 memcpy(task->sc->sense_buffer, sense, 1161 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1162 } 1163 1164 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) { 1165 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 1166 & SOL_RES_CNT_MASK) 1167 conn->rxdata_octets += (psol-> 1168 dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32] 1169 & SOL_RES_CNT_MASK); 1170 } 1171 unmap: 1172 scsi_dma_unmap(io_task->scsi_cmnd); 1173 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1174 } 1175 1176 static void 1177 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1178 struct iscsi_task *task, struct sol_cqe *psol) 1179 { 1180 struct iscsi_logout_rsp *hdr; 1181 struct beiscsi_io_task *io_task = task->dd_data; 1182 struct iscsi_conn *conn = beiscsi_conn->conn; 1183 1184 hdr = (struct iscsi_logout_rsp *)task->hdr; 1185 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1186 hdr->t2wait = 5; 1187 hdr->t2retain = 0; 1188 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1189 & SOL_FLAGS_MASK) >> 24) | 0x80; 1190 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 1191 32] & SOL_RESP_MASK); 1192 hdr->exp_cmdsn = cpu_to_be32(psol-> 1193 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1194 & SOL_EXP_CMD_SN_MASK); 1195 hdr->max_cmdsn = be32_to_cpu((psol-> 1196 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32] 1197 & SOL_EXP_CMD_SN_MASK) + 1198 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1199 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1200 hdr->dlength[0] = 0; 1201 hdr->dlength[1] = 0; 1202 hdr->dlength[2] = 0; 1203 hdr->hlength = 0; 1204 hdr->itt = io_task->libiscsi_itt; 1205 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1206 } 1207 1208 static void 1209 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1210 struct iscsi_task *task, struct sol_cqe *psol) 1211 { 1212 struct iscsi_tm_rsp *hdr; 1213 struct iscsi_conn *conn = beiscsi_conn->conn; 1214 struct beiscsi_io_task *io_task = task->dd_data; 1215 1216 hdr = (struct iscsi_tm_rsp *)task->hdr; 1217 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1218 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1219 & SOL_FLAGS_MASK) >> 24) | 0x80; 1220 hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 1221 32] & SOL_RESP_MASK); 1222 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 1223 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 1224 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 1225 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 1226 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1227 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1228 hdr->itt = io_task->libiscsi_itt; 1229 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1230 } 1231 1232 static void 1233 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1234 struct beiscsi_hba *phba, struct sol_cqe *psol) 1235 { 1236 struct hwi_wrb_context *pwrb_context; 1237 struct wrb_handle *pwrb_handle = NULL; 1238 struct hwi_controller *phwi_ctrlr; 1239 struct iscsi_task *task; 1240 struct beiscsi_io_task *io_task; 1241 struct iscsi_conn *conn = beiscsi_conn->conn; 1242 struct iscsi_session *session = conn->session; 1243 1244 phwi_ctrlr = phba->phwi_ctrlr; 1245 pwrb_context = &phwi_ctrlr->wrb_context[((psol-> 1246 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1247 SOL_CID_MASK) >> 6) - 1248 phba->fw_config.iscsi_cid_start]; 1249 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1250 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1251 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1252 task = pwrb_handle->pio_handle; 1253 1254 io_task = task->dd_data; 1255 spin_lock(&phba->mgmt_sgl_lock); 1256 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 1257 spin_unlock(&phba->mgmt_sgl_lock); 1258 spin_lock_bh(&session->lock); 1259 free_wrb_handle(phba, pwrb_context, pwrb_handle); 1260 spin_unlock_bh(&session->lock); 1261 } 1262 1263 static void 1264 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1265 struct iscsi_task *task, struct sol_cqe *psol) 1266 { 1267 struct iscsi_nopin *hdr; 1268 struct iscsi_conn *conn = beiscsi_conn->conn; 1269 struct beiscsi_io_task *io_task = task->dd_data; 1270 1271 hdr = (struct iscsi_nopin *)task->hdr; 1272 hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] 1273 & SOL_FLAGS_MASK) >> 24) | 0x80; 1274 hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe, 1275 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK); 1276 hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe, 1277 i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) + 1278 ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd) 1279 / 32] & SOL_CMD_WND_MASK) >> 24) - 1); 1280 hdr->opcode = ISCSI_OP_NOOP_IN; 1281 hdr->itt = io_task->libiscsi_itt; 1282 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1283 } 1284 1285 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1286 struct beiscsi_hba *phba, struct sol_cqe *psol) 1287 { 1288 struct hwi_wrb_context *pwrb_context; 1289 struct wrb_handle *pwrb_handle; 1290 struct iscsi_wrb *pwrb = NULL; 1291 struct hwi_controller *phwi_ctrlr; 1292 struct iscsi_task *task; 1293 unsigned int type; 1294 struct iscsi_conn *conn = beiscsi_conn->conn; 1295 struct iscsi_session *session = conn->session; 1296 1297 phwi_ctrlr = phba->phwi_ctrlr; 1298 pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof 1299 (struct amap_sol_cqe, cid) / 32] 1300 & SOL_CID_MASK) >> 6) - 1301 phba->fw_config.iscsi_cid_start]; 1302 pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol-> 1303 dw[offsetof(struct amap_sol_cqe, wrb_index) / 1304 32] & SOL_WRB_INDEX_MASK) >> 16)]; 1305 task = pwrb_handle->pio_handle; 1306 pwrb = pwrb_handle->pwrb; 1307 type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] & 1308 WRB_TYPE_MASK) >> 28; 1309 1310 spin_lock_bh(&session->lock); 1311 switch (type) { 1312 case HWH_TYPE_IO: 1313 case HWH_TYPE_IO_RD: 1314 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1315 ISCSI_OP_NOOP_OUT) 1316 be_complete_nopin_resp(beiscsi_conn, task, psol); 1317 else 1318 be_complete_io(beiscsi_conn, task, psol); 1319 break; 1320 1321 case HWH_TYPE_LOGOUT: 1322 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1323 be_complete_logout(beiscsi_conn, task, psol); 1324 else 1325 be_complete_tmf(beiscsi_conn, task, psol); 1326 1327 break; 1328 1329 case HWH_TYPE_LOGIN: 1330 SE_DEBUG(DBG_LVL_1, 1331 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd" 1332 "- Solicited path\n"); 1333 break; 1334 1335 case HWH_TYPE_NOP: 1336 be_complete_nopin_resp(beiscsi_conn, task, psol); 1337 break; 1338 1339 default: 1340 shost_printk(KERN_WARNING, phba->shost, 1341 "In hwi_complete_cmd, unknown type = %d" 1342 "wrb_index 0x%x CID 0x%x\n", type, 1343 ((psol->dw[offsetof(struct amap_iscsi_wrb, 1344 type) / 32] & SOL_WRB_INDEX_MASK) >> 16), 1345 ((psol->dw[offsetof(struct amap_sol_cqe, 1346 cid) / 32] & SOL_CID_MASK) >> 6)); 1347 break; 1348 } 1349 1350 spin_unlock_bh(&session->lock); 1351 } 1352 1353 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1354 *pasync_ctx, unsigned int is_header, 1355 unsigned int host_write_ptr) 1356 { 1357 if (is_header) 1358 return &pasync_ctx->async_entry[host_write_ptr]. 1359 header_busy_list; 1360 else 1361 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1362 } 1363 1364 static struct async_pdu_handle * 1365 hwi_get_async_handle(struct beiscsi_hba *phba, 1366 struct beiscsi_conn *beiscsi_conn, 1367 struct hwi_async_pdu_context *pasync_ctx, 1368 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1369 { 1370 struct be_bus_address phys_addr; 1371 struct list_head *pbusy_list; 1372 struct async_pdu_handle *pasync_handle = NULL; 1373 int buffer_len = 0; 1374 unsigned char buffer_index = -1; 1375 unsigned char is_header = 0; 1376 1377 phys_addr.u.a32.address_lo = 1378 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] - 1379 ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1380 & PDUCQE_DPL_MASK) >> 16); 1381 phys_addr.u.a32.address_hi = 1382 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32]; 1383 1384 phys_addr.u.a64.address = 1385 *((unsigned long long *)(&phys_addr.u.a64.address)); 1386 1387 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1388 & PDUCQE_CODE_MASK) { 1389 case UNSOL_HDR_NOTIFY: 1390 is_header = 1; 1391 1392 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1, 1393 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1394 index) / 32] & PDUCQE_INDEX_MASK)); 1395 1396 buffer_len = (unsigned int)(phys_addr.u.a64.address - 1397 pasync_ctx->async_header.pa_base.u.a64.address); 1398 1399 buffer_index = buffer_len / 1400 pasync_ctx->async_header.buffer_size; 1401 1402 break; 1403 case UNSOL_DATA_NOTIFY: 1404 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe-> 1405 dw[offsetof(struct amap_i_t_dpdu_cqe, 1406 index) / 32] & PDUCQE_INDEX_MASK)); 1407 buffer_len = (unsigned long)(phys_addr.u.a64.address - 1408 pasync_ctx->async_data.pa_base.u. 1409 a64.address); 1410 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size; 1411 break; 1412 default: 1413 pbusy_list = NULL; 1414 shost_printk(KERN_WARNING, phba->shost, 1415 "Unexpected code=%d\n", 1416 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1417 code) / 32] & PDUCQE_CODE_MASK); 1418 return NULL; 1419 } 1420 1421 WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries)); 1422 WARN_ON(list_empty(pbusy_list)); 1423 list_for_each_entry(pasync_handle, pbusy_list, link) { 1424 WARN_ON(pasync_handle->consumed); 1425 if (pasync_handle->index == buffer_index) 1426 break; 1427 } 1428 1429 WARN_ON(!pasync_handle); 1430 1431 pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - 1432 phba->fw_config.iscsi_cid_start; 1433 pasync_handle->is_header = is_header; 1434 pasync_handle->buffer_len = ((pdpdu_cqe-> 1435 dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32] 1436 & PDUCQE_DPL_MASK) >> 16); 1437 1438 *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1439 index) / 32] & PDUCQE_INDEX_MASK); 1440 return pasync_handle; 1441 } 1442 1443 static unsigned int 1444 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx, 1445 unsigned int is_header, unsigned int cq_index) 1446 { 1447 struct list_head *pbusy_list; 1448 struct async_pdu_handle *pasync_handle; 1449 unsigned int num_entries, writables = 0; 1450 unsigned int *pep_read_ptr, *pwritables; 1451 1452 1453 if (is_header) { 1454 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1455 pwritables = &pasync_ctx->async_header.writables; 1456 num_entries = pasync_ctx->async_header.num_entries; 1457 } else { 1458 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1459 pwritables = &pasync_ctx->async_data.writables; 1460 num_entries = pasync_ctx->async_data.num_entries; 1461 } 1462 1463 while ((*pep_read_ptr) != cq_index) { 1464 (*pep_read_ptr)++; 1465 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1466 1467 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1468 *pep_read_ptr); 1469 if (writables == 0) 1470 WARN_ON(list_empty(pbusy_list)); 1471 1472 if (!list_empty(pbusy_list)) { 1473 pasync_handle = list_entry(pbusy_list->next, 1474 struct async_pdu_handle, 1475 link); 1476 WARN_ON(!pasync_handle); 1477 pasync_handle->consumed = 1; 1478 } 1479 1480 writables++; 1481 } 1482 1483 if (!writables) { 1484 SE_DEBUG(DBG_LVL_1, 1485 "Duplicate notification received - index 0x%x!!\n", 1486 cq_index); 1487 WARN_ON(1); 1488 } 1489 1490 *pwritables = *pwritables + writables; 1491 return 0; 1492 } 1493 1494 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba, 1495 unsigned int cri) 1496 { 1497 struct hwi_controller *phwi_ctrlr; 1498 struct hwi_async_pdu_context *pasync_ctx; 1499 struct async_pdu_handle *pasync_handle, *tmp_handle; 1500 struct list_head *plist; 1501 unsigned int i = 0; 1502 1503 phwi_ctrlr = phba->phwi_ctrlr; 1504 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1505 1506 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1507 1508 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1509 list_del(&pasync_handle->link); 1510 1511 if (i == 0) { 1512 list_add_tail(&pasync_handle->link, 1513 &pasync_ctx->async_header.free_list); 1514 pasync_ctx->async_header.free_entries++; 1515 i++; 1516 } else { 1517 list_add_tail(&pasync_handle->link, 1518 &pasync_ctx->async_data.free_list); 1519 pasync_ctx->async_data.free_entries++; 1520 i++; 1521 } 1522 } 1523 1524 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1525 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1526 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1527 return 0; 1528 } 1529 1530 static struct phys_addr * 1531 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1532 unsigned int is_header, unsigned int host_write_ptr) 1533 { 1534 struct phys_addr *pasync_sge = NULL; 1535 1536 if (is_header) 1537 pasync_sge = pasync_ctx->async_header.ring_base; 1538 else 1539 pasync_sge = pasync_ctx->async_data.ring_base; 1540 1541 return pasync_sge + host_write_ptr; 1542 } 1543 1544 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1545 unsigned int is_header) 1546 { 1547 struct hwi_controller *phwi_ctrlr; 1548 struct hwi_async_pdu_context *pasync_ctx; 1549 struct async_pdu_handle *pasync_handle; 1550 struct list_head *pfree_link, *pbusy_list; 1551 struct phys_addr *pasync_sge; 1552 unsigned int ring_id, num_entries; 1553 unsigned int host_write_num; 1554 unsigned int writables; 1555 unsigned int i = 0; 1556 u32 doorbell = 0; 1557 1558 phwi_ctrlr = phba->phwi_ctrlr; 1559 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1560 1561 if (is_header) { 1562 num_entries = pasync_ctx->async_header.num_entries; 1563 writables = min(pasync_ctx->async_header.writables, 1564 pasync_ctx->async_header.free_entries); 1565 pfree_link = pasync_ctx->async_header.free_list.next; 1566 host_write_num = pasync_ctx->async_header.host_write_ptr; 1567 ring_id = phwi_ctrlr->default_pdu_hdr.id; 1568 } else { 1569 num_entries = pasync_ctx->async_data.num_entries; 1570 writables = min(pasync_ctx->async_data.writables, 1571 pasync_ctx->async_data.free_entries); 1572 pfree_link = pasync_ctx->async_data.free_list.next; 1573 host_write_num = pasync_ctx->async_data.host_write_ptr; 1574 ring_id = phwi_ctrlr->default_pdu_data.id; 1575 } 1576 1577 writables = (writables / 8) * 8; 1578 if (writables) { 1579 for (i = 0; i < writables; i++) { 1580 pbusy_list = 1581 hwi_get_async_busy_list(pasync_ctx, is_header, 1582 host_write_num); 1583 pasync_handle = 1584 list_entry(pfree_link, struct async_pdu_handle, 1585 link); 1586 WARN_ON(!pasync_handle); 1587 pasync_handle->consumed = 0; 1588 1589 pfree_link = pfree_link->next; 1590 1591 pasync_sge = hwi_get_ring_address(pasync_ctx, 1592 is_header, host_write_num); 1593 1594 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1595 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1596 1597 list_move(&pasync_handle->link, pbusy_list); 1598 1599 host_write_num++; 1600 host_write_num = host_write_num % num_entries; 1601 } 1602 1603 if (is_header) { 1604 pasync_ctx->async_header.host_write_ptr = 1605 host_write_num; 1606 pasync_ctx->async_header.free_entries -= writables; 1607 pasync_ctx->async_header.writables -= writables; 1608 pasync_ctx->async_header.busy_entries += writables; 1609 } else { 1610 pasync_ctx->async_data.host_write_ptr = host_write_num; 1611 pasync_ctx->async_data.free_entries -= writables; 1612 pasync_ctx->async_data.writables -= writables; 1613 pasync_ctx->async_data.busy_entries += writables; 1614 } 1615 1616 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1617 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1618 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1619 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1620 << DB_DEF_PDU_CQPROC_SHIFT; 1621 1622 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); 1623 } 1624 } 1625 1626 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1627 struct beiscsi_conn *beiscsi_conn, 1628 struct i_t_dpdu_cqe *pdpdu_cqe) 1629 { 1630 struct hwi_controller *phwi_ctrlr; 1631 struct hwi_async_pdu_context *pasync_ctx; 1632 struct async_pdu_handle *pasync_handle = NULL; 1633 unsigned int cq_index = -1; 1634 1635 phwi_ctrlr = phba->phwi_ctrlr; 1636 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1637 1638 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1639 pdpdu_cqe, &cq_index); 1640 BUG_ON(pasync_handle->is_header != 0); 1641 if (pasync_handle->consumed == 0) 1642 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header, 1643 cq_index); 1644 1645 hwi_free_async_msg(phba, pasync_handle->cri); 1646 hwi_post_async_buffers(phba, pasync_handle->is_header); 1647 } 1648 1649 static unsigned int 1650 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1651 struct beiscsi_hba *phba, 1652 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1653 { 1654 struct list_head *plist; 1655 struct async_pdu_handle *pasync_handle; 1656 void *phdr = NULL; 1657 unsigned int hdr_len = 0, buf_len = 0; 1658 unsigned int status, index = 0, offset = 0; 1659 void *pfirst_buffer = NULL; 1660 unsigned int num_buf = 0; 1661 1662 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1663 1664 list_for_each_entry(pasync_handle, plist, link) { 1665 if (index == 0) { 1666 phdr = pasync_handle->pbuffer; 1667 hdr_len = pasync_handle->buffer_len; 1668 } else { 1669 buf_len = pasync_handle->buffer_len; 1670 if (!num_buf) { 1671 pfirst_buffer = pasync_handle->pbuffer; 1672 num_buf++; 1673 } 1674 memcpy(pfirst_buffer + offset, 1675 pasync_handle->pbuffer, buf_len); 1676 offset = buf_len; 1677 } 1678 index++; 1679 } 1680 1681 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1682 (beiscsi_conn->beiscsi_conn_cid - 1683 phba->fw_config.iscsi_cid_start), 1684 phdr, hdr_len, pfirst_buffer, 1685 buf_len); 1686 1687 if (status == 0) 1688 hwi_free_async_msg(phba, cri); 1689 return 0; 1690 } 1691 1692 static unsigned int 1693 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1694 struct beiscsi_hba *phba, 1695 struct async_pdu_handle *pasync_handle) 1696 { 1697 struct hwi_async_pdu_context *pasync_ctx; 1698 struct hwi_controller *phwi_ctrlr; 1699 unsigned int bytes_needed = 0, status = 0; 1700 unsigned short cri = pasync_handle->cri; 1701 struct pdu_base *ppdu; 1702 1703 phwi_ctrlr = phba->phwi_ctrlr; 1704 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1705 1706 list_del(&pasync_handle->link); 1707 if (pasync_handle->is_header) { 1708 pasync_ctx->async_header.busy_entries--; 1709 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1710 hwi_free_async_msg(phba, cri); 1711 BUG(); 1712 } 1713 1714 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1715 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1716 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1717 (unsigned short)pasync_handle->buffer_len; 1718 list_add_tail(&pasync_handle->link, 1719 &pasync_ctx->async_entry[cri].wait_queue.list); 1720 1721 ppdu = pasync_handle->pbuffer; 1722 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1723 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1724 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1725 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1726 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1727 1728 if (status == 0) { 1729 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1730 bytes_needed; 1731 1732 if (bytes_needed == 0) 1733 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1734 pasync_ctx, cri); 1735 } 1736 } else { 1737 pasync_ctx->async_data.busy_entries--; 1738 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1739 list_add_tail(&pasync_handle->link, 1740 &pasync_ctx->async_entry[cri].wait_queue. 1741 list); 1742 pasync_ctx->async_entry[cri].wait_queue. 1743 bytes_received += 1744 (unsigned short)pasync_handle->buffer_len; 1745 1746 if (pasync_ctx->async_entry[cri].wait_queue. 1747 bytes_received >= 1748 pasync_ctx->async_entry[cri].wait_queue. 1749 bytes_needed) 1750 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1751 pasync_ctx, cri); 1752 } 1753 } 1754 return status; 1755 } 1756 1757 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 1758 struct beiscsi_hba *phba, 1759 struct i_t_dpdu_cqe *pdpdu_cqe) 1760 { 1761 struct hwi_controller *phwi_ctrlr; 1762 struct hwi_async_pdu_context *pasync_ctx; 1763 struct async_pdu_handle *pasync_handle = NULL; 1764 unsigned int cq_index = -1; 1765 1766 phwi_ctrlr = phba->phwi_ctrlr; 1767 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1768 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1769 pdpdu_cqe, &cq_index); 1770 1771 if (pasync_handle->consumed == 0) 1772 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header, 1773 cq_index); 1774 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 1775 hwi_post_async_buffers(phba, pasync_handle->is_header); 1776 } 1777 1778 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 1779 { 1780 struct be_queue_info *mcc_cq; 1781 struct be_mcc_compl *mcc_compl; 1782 unsigned int num_processed = 0; 1783 1784 mcc_cq = &phba->ctrl.mcc_obj.cq; 1785 mcc_compl = queue_tail_node(mcc_cq); 1786 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1787 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1788 1789 if (num_processed >= 32) { 1790 hwi_ring_cq_db(phba, mcc_cq->id, 1791 num_processed, 0, 0); 1792 num_processed = 0; 1793 } 1794 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1795 /* Interpret flags as an async trailer */ 1796 if (is_link_state_evt(mcc_compl->flags)) 1797 /* Interpret compl as a async link evt */ 1798 beiscsi_async_link_state_process(phba, 1799 (struct be_async_event_link_state *) mcc_compl); 1800 else 1801 SE_DEBUG(DBG_LVL_1, 1802 " Unsupported Async Event, flags" 1803 " = 0x%08x\n", mcc_compl->flags); 1804 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1805 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 1806 atomic_dec(&phba->ctrl.mcc_obj.q.used); 1807 } 1808 1809 mcc_compl->flags = 0; 1810 queue_tail_inc(mcc_cq); 1811 mcc_compl = queue_tail_node(mcc_cq); 1812 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1813 num_processed++; 1814 } 1815 1816 if (num_processed > 0) 1817 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 1818 1819 } 1820 1821 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 1822 { 1823 struct be_queue_info *cq; 1824 struct sol_cqe *sol; 1825 struct dmsg_cqe *dmsg; 1826 unsigned int num_processed = 0; 1827 unsigned int tot_nump = 0; 1828 struct beiscsi_conn *beiscsi_conn; 1829 struct beiscsi_endpoint *beiscsi_ep; 1830 struct iscsi_endpoint *ep; 1831 struct beiscsi_hba *phba; 1832 1833 cq = pbe_eq->cq; 1834 sol = queue_tail_node(cq); 1835 phba = pbe_eq->phba; 1836 1837 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1838 CQE_VALID_MASK) { 1839 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1840 1841 ep = phba->ep_array[(u32) ((sol-> 1842 dw[offsetof(struct amap_sol_cqe, cid) / 32] & 1843 SOL_CID_MASK) >> 6) - 1844 phba->fw_config.iscsi_cid_start]; 1845 1846 beiscsi_ep = ep->dd_data; 1847 beiscsi_conn = beiscsi_ep->conn; 1848 1849 if (num_processed >= 32) { 1850 hwi_ring_cq_db(phba, cq->id, 1851 num_processed, 0, 0); 1852 tot_nump += num_processed; 1853 num_processed = 0; 1854 } 1855 1856 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) / 1857 32] & CQE_CODE_MASK) { 1858 case SOL_CMD_COMPLETE: 1859 hwi_complete_cmd(beiscsi_conn, phba, sol); 1860 break; 1861 case DRIVERMSG_NOTIFY: 1862 SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n"); 1863 dmsg = (struct dmsg_cqe *)sol; 1864 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1865 break; 1866 case UNSOL_HDR_NOTIFY: 1867 SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n"); 1868 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1869 (struct i_t_dpdu_cqe *)sol); 1870 break; 1871 case UNSOL_DATA_NOTIFY: 1872 SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n"); 1873 hwi_process_default_pdu_ring(beiscsi_conn, phba, 1874 (struct i_t_dpdu_cqe *)sol); 1875 break; 1876 case CXN_INVALIDATE_INDEX_NOTIFY: 1877 case CMD_INVALIDATED_NOTIFY: 1878 case CXN_INVALIDATE_NOTIFY: 1879 SE_DEBUG(DBG_LVL_1, 1880 "Ignoring CQ Error notification for cmd/cxn" 1881 "invalidate\n"); 1882 break; 1883 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1884 case CMD_KILLED_INVALID_STATSN_RCVD: 1885 case CMD_KILLED_INVALID_R2T_RCVD: 1886 case CMD_CXN_KILLED_LUN_INVALID: 1887 case CMD_CXN_KILLED_ICD_INVALID: 1888 case CMD_CXN_KILLED_ITT_INVALID: 1889 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1890 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1891 SE_DEBUG(DBG_LVL_1, 1892 "CQ Error notification for cmd.. " 1893 "code %d cid 0x%x\n", 1894 sol->dw[offsetof(struct amap_sol_cqe, code) / 1895 32] & CQE_CODE_MASK, 1896 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1897 32] & SOL_CID_MASK)); 1898 break; 1899 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1900 SE_DEBUG(DBG_LVL_1, 1901 "Digest error on def pdu ring, dropping..\n"); 1902 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 1903 (struct i_t_dpdu_cqe *) sol); 1904 break; 1905 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 1906 case CXN_KILLED_BURST_LEN_MISMATCH: 1907 case CXN_KILLED_AHS_RCVD: 1908 case CXN_KILLED_HDR_DIGEST_ERR: 1909 case CXN_KILLED_UNKNOWN_HDR: 1910 case CXN_KILLED_STALE_ITT_TTT_RCVD: 1911 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 1912 case CXN_KILLED_TIMED_OUT: 1913 case CXN_KILLED_FIN_RCVD: 1914 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 1915 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 1916 case CXN_KILLED_OVER_RUN_RESIDUAL: 1917 case CXN_KILLED_UNDER_RUN_RESIDUAL: 1918 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 1919 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID " 1920 "0x%x...\n", 1921 sol->dw[offsetof(struct amap_sol_cqe, code) / 1922 32] & CQE_CODE_MASK, 1923 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1924 32] & CQE_CID_MASK)); 1925 iscsi_conn_failure(beiscsi_conn->conn, 1926 ISCSI_ERR_CONN_FAILED); 1927 break; 1928 case CXN_KILLED_RST_SENT: 1929 case CXN_KILLED_RST_RCVD: 1930 SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset" 1931 "received/sent on CID 0x%x...\n", 1932 sol->dw[offsetof(struct amap_sol_cqe, code) / 1933 32] & CQE_CODE_MASK, 1934 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1935 32] & CQE_CID_MASK)); 1936 iscsi_conn_failure(beiscsi_conn->conn, 1937 ISCSI_ERR_CONN_FAILED); 1938 break; 1939 default: 1940 SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d " 1941 "received on CID 0x%x...\n", 1942 sol->dw[offsetof(struct amap_sol_cqe, code) / 1943 32] & CQE_CODE_MASK, 1944 (sol->dw[offsetof(struct amap_sol_cqe, cid) / 1945 32] & CQE_CID_MASK)); 1946 break; 1947 } 1948 1949 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 1950 queue_tail_inc(cq); 1951 sol = queue_tail_node(cq); 1952 num_processed++; 1953 } 1954 1955 if (num_processed > 0) { 1956 tot_nump += num_processed; 1957 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 1958 } 1959 return tot_nump; 1960 } 1961 1962 void beiscsi_process_all_cqs(struct work_struct *work) 1963 { 1964 unsigned long flags; 1965 struct hwi_controller *phwi_ctrlr; 1966 struct hwi_context_memory *phwi_context; 1967 struct be_eq_obj *pbe_eq; 1968 struct beiscsi_hba *phba = 1969 container_of(work, struct beiscsi_hba, work_cqs); 1970 1971 phwi_ctrlr = phba->phwi_ctrlr; 1972 phwi_context = phwi_ctrlr->phwi_ctxt; 1973 if (phba->msix_enabled) 1974 pbe_eq = &phwi_context->be_eq[phba->num_cpus]; 1975 else 1976 pbe_eq = &phwi_context->be_eq[0]; 1977 1978 if (phba->todo_mcc_cq) { 1979 spin_lock_irqsave(&phba->isr_lock, flags); 1980 phba->todo_mcc_cq = 0; 1981 spin_unlock_irqrestore(&phba->isr_lock, flags); 1982 beiscsi_process_mcc_isr(phba); 1983 } 1984 1985 if (phba->todo_cq) { 1986 spin_lock_irqsave(&phba->isr_lock, flags); 1987 phba->todo_cq = 0; 1988 spin_unlock_irqrestore(&phba->isr_lock, flags); 1989 beiscsi_process_cq(pbe_eq); 1990 } 1991 } 1992 1993 static int be_iopoll(struct blk_iopoll *iop, int budget) 1994 { 1995 static unsigned int ret; 1996 struct beiscsi_hba *phba; 1997 struct be_eq_obj *pbe_eq; 1998 1999 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2000 ret = beiscsi_process_cq(pbe_eq); 2001 if (ret < budget) { 2002 phba = pbe_eq->phba; 2003 blk_iopoll_complete(iop); 2004 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id); 2005 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2006 } 2007 return ret; 2008 } 2009 2010 static void 2011 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2012 unsigned int num_sg, struct beiscsi_io_task *io_task) 2013 { 2014 struct iscsi_sge *psgl; 2015 unsigned int sg_len, index; 2016 unsigned int sge_len = 0; 2017 unsigned long long addr; 2018 struct scatterlist *l_sg; 2019 unsigned int offset; 2020 2021 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2022 io_task->bhs_pa.u.a32.address_lo); 2023 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2024 io_task->bhs_pa.u.a32.address_hi); 2025 2026 l_sg = sg; 2027 for (index = 0; (index < num_sg) && (index < 2); index++, 2028 sg = sg_next(sg)) { 2029 if (index == 0) { 2030 sg_len = sg_dma_len(sg); 2031 addr = (u64) sg_dma_address(sg); 2032 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2033 ((u32)(addr & 0xFFFFFFFF))); 2034 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2035 ((u32)(addr >> 32))); 2036 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2037 sg_len); 2038 sge_len = sg_len; 2039 } else { 2040 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2041 pwrb, sge_len); 2042 sg_len = sg_dma_len(sg); 2043 addr = (u64) sg_dma_address(sg); 2044 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2045 ((u32)(addr & 0xFFFFFFFF))); 2046 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2047 ((u32)(addr >> 32))); 2048 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2049 sg_len); 2050 } 2051 } 2052 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2053 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2054 2055 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2056 2057 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2058 io_task->bhs_pa.u.a32.address_hi); 2059 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2060 io_task->bhs_pa.u.a32.address_lo); 2061 2062 if (num_sg == 1) { 2063 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2064 1); 2065 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2066 0); 2067 } else if (num_sg == 2) { 2068 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2069 0); 2070 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2071 1); 2072 } else { 2073 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2074 0); 2075 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2076 0); 2077 } 2078 sg = l_sg; 2079 psgl++; 2080 psgl++; 2081 offset = 0; 2082 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2083 sg_len = sg_dma_len(sg); 2084 addr = (u64) sg_dma_address(sg); 2085 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2086 (addr & 0xFFFFFFFF)); 2087 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2088 (addr >> 32)); 2089 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2090 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2091 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2092 offset += sg_len; 2093 } 2094 psgl--; 2095 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2096 } 2097 2098 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2099 { 2100 struct iscsi_sge *psgl; 2101 unsigned long long addr; 2102 struct beiscsi_io_task *io_task = task->dd_data; 2103 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2104 struct beiscsi_hba *phba = beiscsi_conn->phba; 2105 2106 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2107 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2108 io_task->bhs_pa.u.a32.address_lo); 2109 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2110 io_task->bhs_pa.u.a32.address_hi); 2111 2112 if (task->data) { 2113 if (task->data_count) { 2114 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 2115 addr = (u64) pci_map_single(phba->pcidev, 2116 task->data, 2117 task->data_count, 1); 2118 } else { 2119 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2120 addr = 0; 2121 } 2122 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2123 ((u32)(addr & 0xFFFFFFFF))); 2124 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2125 ((u32)(addr >> 32))); 2126 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2127 task->data_count); 2128 2129 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2130 } else { 2131 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2132 addr = 0; 2133 } 2134 2135 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2136 2137 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2138 2139 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2140 io_task->bhs_pa.u.a32.address_hi); 2141 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2142 io_task->bhs_pa.u.a32.address_lo); 2143 if (task->data) { 2144 psgl++; 2145 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2146 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2147 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2148 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2149 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2150 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2151 2152 psgl++; 2153 if (task->data) { 2154 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2155 ((u32)(addr & 0xFFFFFFFF))); 2156 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2157 ((u32)(addr >> 32))); 2158 } 2159 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2160 } 2161 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2162 } 2163 2164 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2165 { 2166 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2167 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2168 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2169 2170 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2171 sizeof(struct sol_cqe)); 2172 num_async_pdu_buf_pages = 2173 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2174 phba->params.defpdu_hdr_sz); 2175 num_async_pdu_buf_sgl_pages = 2176 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2177 sizeof(struct phys_addr)); 2178 num_async_pdu_data_pages = 2179 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2180 phba->params.defpdu_data_sz); 2181 num_async_pdu_data_sgl_pages = 2182 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2183 sizeof(struct phys_addr)); 2184 2185 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2186 2187 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2188 BE_ISCSI_PDU_HEADER_SIZE; 2189 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2190 sizeof(struct hwi_context_memory); 2191 2192 2193 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2194 * (phba->params.wrbs_per_cxn) 2195 * phba->params.cxns_per_ctrl; 2196 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2197 (phba->params.wrbs_per_cxn); 2198 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2199 phba->params.cxns_per_ctrl); 2200 2201 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2202 phba->params.icds_per_ctrl; 2203 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2204 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2205 2206 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = 2207 num_async_pdu_buf_pages * PAGE_SIZE; 2208 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = 2209 num_async_pdu_data_pages * PAGE_SIZE; 2210 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = 2211 num_async_pdu_buf_sgl_pages * PAGE_SIZE; 2212 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = 2213 num_async_pdu_data_sgl_pages * PAGE_SIZE; 2214 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = 2215 phba->params.asyncpdus_per_ctrl * 2216 sizeof(struct async_pdu_handle); 2217 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = 2218 phba->params.asyncpdus_per_ctrl * 2219 sizeof(struct async_pdu_handle); 2220 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = 2221 sizeof(struct hwi_async_pdu_context) + 2222 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); 2223 } 2224 2225 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2226 { 2227 struct be_mem_descriptor *mem_descr; 2228 dma_addr_t bus_add; 2229 struct mem_array *mem_arr, *mem_arr_orig; 2230 unsigned int i, j, alloc_size, curr_alloc_size; 2231 2232 phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2233 if (!phba->phwi_ctrlr) 2234 return -ENOMEM; 2235 2236 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2237 GFP_KERNEL); 2238 if (!phba->init_mem) { 2239 kfree(phba->phwi_ctrlr); 2240 return -ENOMEM; 2241 } 2242 2243 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2244 GFP_KERNEL); 2245 if (!mem_arr_orig) { 2246 kfree(phba->init_mem); 2247 kfree(phba->phwi_ctrlr); 2248 return -ENOMEM; 2249 } 2250 2251 mem_descr = phba->init_mem; 2252 for (i = 0; i < SE_MEM_MAX; i++) { 2253 j = 0; 2254 mem_arr = mem_arr_orig; 2255 alloc_size = phba->mem_req[i]; 2256 memset(mem_arr, 0, sizeof(struct mem_array) * 2257 BEISCSI_MAX_FRAGS_INIT); 2258 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2259 do { 2260 mem_arr->virtual_address = pci_alloc_consistent( 2261 phba->pcidev, 2262 curr_alloc_size, 2263 &bus_add); 2264 if (!mem_arr->virtual_address) { 2265 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2266 goto free_mem; 2267 if (curr_alloc_size - 2268 rounddown_pow_of_two(curr_alloc_size)) 2269 curr_alloc_size = rounddown_pow_of_two 2270 (curr_alloc_size); 2271 else 2272 curr_alloc_size = curr_alloc_size / 2; 2273 } else { 2274 mem_arr->bus_address.u. 2275 a64.address = (__u64) bus_add; 2276 mem_arr->size = curr_alloc_size; 2277 alloc_size -= curr_alloc_size; 2278 curr_alloc_size = min(be_max_phys_size * 2279 1024, alloc_size); 2280 j++; 2281 mem_arr++; 2282 } 2283 } while (alloc_size); 2284 mem_descr->num_elements = j; 2285 mem_descr->size_in_bytes = phba->mem_req[i]; 2286 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2287 GFP_KERNEL); 2288 if (!mem_descr->mem_array) 2289 goto free_mem; 2290 2291 memcpy(mem_descr->mem_array, mem_arr_orig, 2292 sizeof(struct mem_array) * j); 2293 mem_descr++; 2294 } 2295 kfree(mem_arr_orig); 2296 return 0; 2297 free_mem: 2298 mem_descr->num_elements = j; 2299 while ((i) || (j)) { 2300 for (j = mem_descr->num_elements; j > 0; j--) { 2301 pci_free_consistent(phba->pcidev, 2302 mem_descr->mem_array[j - 1].size, 2303 mem_descr->mem_array[j - 1]. 2304 virtual_address, 2305 (unsigned long)mem_descr-> 2306 mem_array[j - 1]. 2307 bus_address.u.a64.address); 2308 } 2309 if (i) { 2310 i--; 2311 kfree(mem_descr->mem_array); 2312 mem_descr--; 2313 } 2314 } 2315 kfree(mem_arr_orig); 2316 kfree(phba->init_mem); 2317 kfree(phba->phwi_ctrlr); 2318 return -ENOMEM; 2319 } 2320 2321 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2322 { 2323 beiscsi_find_mem_req(phba); 2324 return beiscsi_alloc_mem(phba); 2325 } 2326 2327 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2328 { 2329 struct pdu_data_out *pdata_out; 2330 struct pdu_nop_out *pnop_out; 2331 struct be_mem_descriptor *mem_descr; 2332 2333 mem_descr = phba->init_mem; 2334 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2335 pdata_out = 2336 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2337 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2338 2339 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2340 IIOC_SCSI_DATA); 2341 2342 pnop_out = 2343 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2344 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2345 2346 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2347 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2348 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2349 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2350 } 2351 2352 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2353 { 2354 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2355 struct wrb_handle *pwrb_handle; 2356 struct hwi_controller *phwi_ctrlr; 2357 struct hwi_wrb_context *pwrb_context; 2358 struct iscsi_wrb *pwrb; 2359 unsigned int num_cxn_wrbh; 2360 unsigned int num_cxn_wrb, j, idx, index; 2361 2362 mem_descr_wrbh = phba->init_mem; 2363 mem_descr_wrbh += HWI_MEM_WRBH; 2364 2365 mem_descr_wrb = phba->init_mem; 2366 mem_descr_wrb += HWI_MEM_WRB; 2367 2368 idx = 0; 2369 pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address; 2370 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2371 ((sizeof(struct wrb_handle)) * 2372 phba->params.wrbs_per_cxn)); 2373 phwi_ctrlr = phba->phwi_ctrlr; 2374 2375 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2376 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2377 pwrb_context->pwrb_handle_base = 2378 kzalloc(sizeof(struct wrb_handle *) * 2379 phba->params.wrbs_per_cxn, GFP_KERNEL); 2380 pwrb_context->pwrb_handle_basestd = 2381 kzalloc(sizeof(struct wrb_handle *) * 2382 phba->params.wrbs_per_cxn, GFP_KERNEL); 2383 if (num_cxn_wrbh) { 2384 pwrb_context->alloc_index = 0; 2385 pwrb_context->wrb_handles_available = 0; 2386 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2387 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2388 pwrb_context->pwrb_handle_basestd[j] = 2389 pwrb_handle; 2390 pwrb_context->wrb_handles_available++; 2391 pwrb_handle->wrb_index = j; 2392 pwrb_handle++; 2393 } 2394 pwrb_context->free_index = 0; 2395 num_cxn_wrbh--; 2396 } else { 2397 idx++; 2398 pwrb_handle = 2399 mem_descr_wrbh->mem_array[idx].virtual_address; 2400 num_cxn_wrbh = 2401 ((mem_descr_wrbh->mem_array[idx].size) / 2402 ((sizeof(struct wrb_handle)) * 2403 phba->params.wrbs_per_cxn)); 2404 pwrb_context->alloc_index = 0; 2405 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2406 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2407 pwrb_context->pwrb_handle_basestd[j] = 2408 pwrb_handle; 2409 pwrb_context->wrb_handles_available++; 2410 pwrb_handle->wrb_index = j; 2411 pwrb_handle++; 2412 } 2413 pwrb_context->free_index = 0; 2414 num_cxn_wrbh--; 2415 } 2416 } 2417 idx = 0; 2418 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2419 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2420 ((sizeof(struct iscsi_wrb) * 2421 phba->params.wrbs_per_cxn)); 2422 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 2423 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2424 if (num_cxn_wrb) { 2425 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2426 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2427 pwrb_handle->pwrb = pwrb; 2428 pwrb++; 2429 } 2430 num_cxn_wrb--; 2431 } else { 2432 idx++; 2433 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2434 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2435 ((sizeof(struct iscsi_wrb) * 2436 phba->params.wrbs_per_cxn)); 2437 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2438 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2439 pwrb_handle->pwrb = pwrb; 2440 pwrb++; 2441 } 2442 num_cxn_wrb--; 2443 } 2444 } 2445 } 2446 2447 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2448 { 2449 struct hwi_controller *phwi_ctrlr; 2450 struct hba_parameters *p = &phba->params; 2451 struct hwi_async_pdu_context *pasync_ctx; 2452 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2453 unsigned int index; 2454 struct be_mem_descriptor *mem_descr; 2455 2456 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2457 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; 2458 2459 phwi_ctrlr = phba->phwi_ctrlr; 2460 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) 2461 mem_descr->mem_array[0].virtual_address; 2462 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2463 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2464 2465 pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl; 2466 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2467 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2468 pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl; 2469 2470 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2471 mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2472 if (mem_descr->mem_array[0].virtual_address) { 2473 SE_DEBUG(DBG_LVL_8, 2474 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF" 2475 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2476 } else 2477 shost_printk(KERN_WARNING, phba->shost, 2478 "No Virtual address\n"); 2479 2480 pasync_ctx->async_header.va_base = 2481 mem_descr->mem_array[0].virtual_address; 2482 2483 pasync_ctx->async_header.pa_base.u.a64.address = 2484 mem_descr->mem_array[0].bus_address.u.a64.address; 2485 2486 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2487 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2488 if (mem_descr->mem_array[0].virtual_address) { 2489 SE_DEBUG(DBG_LVL_8, 2490 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING" 2491 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2492 } else 2493 shost_printk(KERN_WARNING, phba->shost, 2494 "No Virtual address\n"); 2495 pasync_ctx->async_header.ring_base = 2496 mem_descr->mem_array[0].virtual_address; 2497 2498 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2499 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; 2500 if (mem_descr->mem_array[0].virtual_address) { 2501 SE_DEBUG(DBG_LVL_8, 2502 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE" 2503 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2504 } else 2505 shost_printk(KERN_WARNING, phba->shost, 2506 "No Virtual address\n"); 2507 2508 pasync_ctx->async_header.handle_base = 2509 mem_descr->mem_array[0].virtual_address; 2510 pasync_ctx->async_header.writables = 0; 2511 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2512 2513 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2514 mem_descr += HWI_MEM_ASYNC_DATA_BUF; 2515 if (mem_descr->mem_array[0].virtual_address) { 2516 SE_DEBUG(DBG_LVL_8, 2517 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF" 2518 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2519 } else 2520 shost_printk(KERN_WARNING, phba->shost, 2521 "No Virtual address\n"); 2522 pasync_ctx->async_data.va_base = 2523 mem_descr->mem_array[0].virtual_address; 2524 pasync_ctx->async_data.pa_base.u.a64.address = 2525 mem_descr->mem_array[0].bus_address.u.a64.address; 2526 2527 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2528 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2529 if (mem_descr->mem_array[0].virtual_address) { 2530 SE_DEBUG(DBG_LVL_8, 2531 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING" 2532 "va=%p\n", mem_descr->mem_array[0].virtual_address); 2533 } else 2534 shost_printk(KERN_WARNING, phba->shost, 2535 "No Virtual address\n"); 2536 2537 pasync_ctx->async_data.ring_base = 2538 mem_descr->mem_array[0].virtual_address; 2539 2540 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2541 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 2542 if (!mem_descr->mem_array[0].virtual_address) 2543 shost_printk(KERN_WARNING, phba->shost, 2544 "No Virtual address\n"); 2545 2546 pasync_ctx->async_data.handle_base = 2547 mem_descr->mem_array[0].virtual_address; 2548 pasync_ctx->async_data.writables = 0; 2549 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2550 2551 pasync_header_h = 2552 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; 2553 pasync_data_h = 2554 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 2555 2556 for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 2557 pasync_header_h->cri = -1; 2558 pasync_header_h->index = (char)index; 2559 INIT_LIST_HEAD(&pasync_header_h->link); 2560 pasync_header_h->pbuffer = 2561 (void *)((unsigned long) 2562 (pasync_ctx->async_header.va_base) + 2563 (p->defpdu_hdr_sz * index)); 2564 2565 pasync_header_h->pa.u.a64.address = 2566 pasync_ctx->async_header.pa_base.u.a64.address + 2567 (p->defpdu_hdr_sz * index); 2568 2569 list_add_tail(&pasync_header_h->link, 2570 &pasync_ctx->async_header.free_list); 2571 pasync_header_h++; 2572 pasync_ctx->async_header.free_entries++; 2573 pasync_ctx->async_header.writables++; 2574 2575 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); 2576 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2577 header_busy_list); 2578 pasync_data_h->cri = -1; 2579 pasync_data_h->index = (char)index; 2580 INIT_LIST_HEAD(&pasync_data_h->link); 2581 pasync_data_h->pbuffer = 2582 (void *)((unsigned long) 2583 (pasync_ctx->async_data.va_base) + 2584 (p->defpdu_data_sz * index)); 2585 2586 pasync_data_h->pa.u.a64.address = 2587 pasync_ctx->async_data.pa_base.u.a64.address + 2588 (p->defpdu_data_sz * index); 2589 2590 list_add_tail(&pasync_data_h->link, 2591 &pasync_ctx->async_data.free_list); 2592 pasync_data_h++; 2593 pasync_ctx->async_data.free_entries++; 2594 pasync_ctx->async_data.writables++; 2595 2596 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); 2597 } 2598 2599 pasync_ctx->async_header.host_write_ptr = 0; 2600 pasync_ctx->async_header.ep_read_ptr = -1; 2601 pasync_ctx->async_data.host_write_ptr = 0; 2602 pasync_ctx->async_data.ep_read_ptr = -1; 2603 } 2604 2605 static int 2606 be_sgl_create_contiguous(void *virtual_address, 2607 u64 physical_address, u32 length, 2608 struct be_dma_mem *sgl) 2609 { 2610 WARN_ON(!virtual_address); 2611 WARN_ON(!physical_address); 2612 WARN_ON(!length > 0); 2613 WARN_ON(!sgl); 2614 2615 sgl->va = virtual_address; 2616 sgl->dma = (unsigned long)physical_address; 2617 sgl->size = length; 2618 2619 return 0; 2620 } 2621 2622 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2623 { 2624 memset(sgl, 0, sizeof(*sgl)); 2625 } 2626 2627 static void 2628 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2629 struct mem_array *pmem, struct be_dma_mem *sgl) 2630 { 2631 if (sgl->va) 2632 be_sgl_destroy_contiguous(sgl); 2633 2634 be_sgl_create_contiguous(pmem->virtual_address, 2635 pmem->bus_address.u.a64.address, 2636 pmem->size, sgl); 2637 } 2638 2639 static void 2640 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2641 struct mem_array *pmem, struct be_dma_mem *sgl) 2642 { 2643 if (sgl->va) 2644 be_sgl_destroy_contiguous(sgl); 2645 2646 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2647 pmem->bus_address.u.a64.address, 2648 pmem->size, sgl); 2649 } 2650 2651 static int be_fill_queue(struct be_queue_info *q, 2652 u16 len, u16 entry_size, void *vaddress) 2653 { 2654 struct be_dma_mem *mem = &q->dma_mem; 2655 2656 memset(q, 0, sizeof(*q)); 2657 q->len = len; 2658 q->entry_size = entry_size; 2659 mem->size = len * entry_size; 2660 mem->va = vaddress; 2661 if (!mem->va) 2662 return -ENOMEM; 2663 memset(mem->va, 0, mem->size); 2664 return 0; 2665 } 2666 2667 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 2668 struct hwi_context_memory *phwi_context) 2669 { 2670 unsigned int i, num_eq_pages; 2671 int ret, eq_for_mcc; 2672 struct be_queue_info *eq; 2673 struct be_dma_mem *mem; 2674 void *eq_vaddress; 2675 dma_addr_t paddr; 2676 2677 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 2678 sizeof(struct be_eq_entry)); 2679 2680 if (phba->msix_enabled) 2681 eq_for_mcc = 1; 2682 else 2683 eq_for_mcc = 0; 2684 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 2685 eq = &phwi_context->be_eq[i].q; 2686 mem = &eq->dma_mem; 2687 phwi_context->be_eq[i].phba = phba; 2688 eq_vaddress = pci_alloc_consistent(phba->pcidev, 2689 num_eq_pages * PAGE_SIZE, 2690 &paddr); 2691 if (!eq_vaddress) 2692 goto create_eq_error; 2693 2694 mem->va = eq_vaddress; 2695 ret = be_fill_queue(eq, phba->params.num_eq_entries, 2696 sizeof(struct be_eq_entry), eq_vaddress); 2697 if (ret) { 2698 shost_printk(KERN_ERR, phba->shost, 2699 "be_fill_queue Failed for EQ\n"); 2700 goto create_eq_error; 2701 } 2702 2703 mem->dma = paddr; 2704 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 2705 phwi_context->cur_eqd); 2706 if (ret) { 2707 shost_printk(KERN_ERR, phba->shost, 2708 "beiscsi_cmd_eq_create" 2709 "Failedfor EQ\n"); 2710 goto create_eq_error; 2711 } 2712 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id); 2713 } 2714 return 0; 2715 create_eq_error: 2716 for (i = 0; i < (phba->num_cpus + 1); i++) { 2717 eq = &phwi_context->be_eq[i].q; 2718 mem = &eq->dma_mem; 2719 if (mem->va) 2720 pci_free_consistent(phba->pcidev, num_eq_pages 2721 * PAGE_SIZE, 2722 mem->va, mem->dma); 2723 } 2724 return ret; 2725 } 2726 2727 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 2728 struct hwi_context_memory *phwi_context) 2729 { 2730 unsigned int i, num_cq_pages; 2731 int ret; 2732 struct be_queue_info *cq, *eq; 2733 struct be_dma_mem *mem; 2734 struct be_eq_obj *pbe_eq; 2735 void *cq_vaddress; 2736 dma_addr_t paddr; 2737 2738 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2739 sizeof(struct sol_cqe)); 2740 2741 for (i = 0; i < phba->num_cpus; i++) { 2742 cq = &phwi_context->be_cq[i]; 2743 eq = &phwi_context->be_eq[i].q; 2744 pbe_eq = &phwi_context->be_eq[i]; 2745 pbe_eq->cq = cq; 2746 pbe_eq->phba = phba; 2747 mem = &cq->dma_mem; 2748 cq_vaddress = pci_alloc_consistent(phba->pcidev, 2749 num_cq_pages * PAGE_SIZE, 2750 &paddr); 2751 if (!cq_vaddress) 2752 goto create_cq_error; 2753 ret = be_fill_queue(cq, phba->params.num_cq_entries, 2754 sizeof(struct sol_cqe), cq_vaddress); 2755 if (ret) { 2756 shost_printk(KERN_ERR, phba->shost, 2757 "be_fill_queue Failed for ISCSI CQ\n"); 2758 goto create_cq_error; 2759 } 2760 2761 mem->dma = paddr; 2762 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 2763 false, 0); 2764 if (ret) { 2765 shost_printk(KERN_ERR, phba->shost, 2766 "beiscsi_cmd_eq_create" 2767 "Failed for ISCSI CQ\n"); 2768 goto create_cq_error; 2769 } 2770 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n", 2771 cq->id, eq->id); 2772 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n"); 2773 } 2774 return 0; 2775 2776 create_cq_error: 2777 for (i = 0; i < phba->num_cpus; i++) { 2778 cq = &phwi_context->be_cq[i]; 2779 mem = &cq->dma_mem; 2780 if (mem->va) 2781 pci_free_consistent(phba->pcidev, num_cq_pages 2782 * PAGE_SIZE, 2783 mem->va, mem->dma); 2784 } 2785 return ret; 2786 2787 } 2788 2789 static int 2790 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 2791 struct hwi_context_memory *phwi_context, 2792 struct hwi_controller *phwi_ctrlr, 2793 unsigned int def_pdu_ring_sz) 2794 { 2795 unsigned int idx; 2796 int ret; 2797 struct be_queue_info *dq, *cq; 2798 struct be_dma_mem *mem; 2799 struct be_mem_descriptor *mem_descr; 2800 void *dq_vaddress; 2801 2802 idx = 0; 2803 dq = &phwi_context->be_def_hdrq; 2804 cq = &phwi_context->be_cq[0]; 2805 mem = &dq->dma_mem; 2806 mem_descr = phba->init_mem; 2807 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2808 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 2809 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 2810 sizeof(struct phys_addr), 2811 sizeof(struct phys_addr), dq_vaddress); 2812 if (ret) { 2813 shost_printk(KERN_ERR, phba->shost, 2814 "be_fill_queue Failed for DEF PDU HDR\n"); 2815 return ret; 2816 } 2817 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2818 bus_address.u.a64.address; 2819 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 2820 def_pdu_ring_sz, 2821 phba->params.defpdu_hdr_sz); 2822 if (ret) { 2823 shost_printk(KERN_ERR, phba->shost, 2824 "be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 2825 return ret; 2826 } 2827 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; 2828 SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n", 2829 phwi_context->be_def_hdrq.id); 2830 hwi_post_async_buffers(phba, 1); 2831 return 0; 2832 } 2833 2834 static int 2835 beiscsi_create_def_data(struct beiscsi_hba *phba, 2836 struct hwi_context_memory *phwi_context, 2837 struct hwi_controller *phwi_ctrlr, 2838 unsigned int def_pdu_ring_sz) 2839 { 2840 unsigned int idx; 2841 int ret; 2842 struct be_queue_info *dataq, *cq; 2843 struct be_dma_mem *mem; 2844 struct be_mem_descriptor *mem_descr; 2845 void *dq_vaddress; 2846 2847 idx = 0; 2848 dataq = &phwi_context->be_def_dataq; 2849 cq = &phwi_context->be_cq[0]; 2850 mem = &dataq->dma_mem; 2851 mem_descr = phba->init_mem; 2852 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2853 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 2854 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 2855 sizeof(struct phys_addr), 2856 sizeof(struct phys_addr), dq_vaddress); 2857 if (ret) { 2858 shost_printk(KERN_ERR, phba->shost, 2859 "be_fill_queue Failed for DEF PDU DATA\n"); 2860 return ret; 2861 } 2862 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 2863 bus_address.u.a64.address; 2864 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 2865 def_pdu_ring_sz, 2866 phba->params.defpdu_data_sz); 2867 if (ret) { 2868 shost_printk(KERN_ERR, phba->shost, 2869 "be_cmd_create_default_pdu_queue Failed" 2870 " for DEF PDU DATA\n"); 2871 return ret; 2872 } 2873 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 2874 SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n", 2875 phwi_context->be_def_dataq.id); 2876 hwi_post_async_buffers(phba, 0); 2877 SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n"); 2878 return 0; 2879 } 2880 2881 static int 2882 beiscsi_post_pages(struct beiscsi_hba *phba) 2883 { 2884 struct be_mem_descriptor *mem_descr; 2885 struct mem_array *pm_arr; 2886 unsigned int page_offset, i; 2887 struct be_dma_mem sgl; 2888 int status; 2889 2890 mem_descr = phba->init_mem; 2891 mem_descr += HWI_MEM_SGE; 2892 pm_arr = mem_descr->mem_array; 2893 2894 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 2895 phba->fw_config.iscsi_icd_start) / PAGE_SIZE; 2896 for (i = 0; i < mem_descr->num_elements; i++) { 2897 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 2898 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 2899 page_offset, 2900 (pm_arr->size / PAGE_SIZE)); 2901 page_offset += pm_arr->size / PAGE_SIZE; 2902 if (status != 0) { 2903 shost_printk(KERN_ERR, phba->shost, 2904 "post sgl failed.\n"); 2905 return status; 2906 } 2907 pm_arr++; 2908 } 2909 SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n"); 2910 return 0; 2911 } 2912 2913 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 2914 { 2915 struct be_dma_mem *mem = &q->dma_mem; 2916 if (mem->va) 2917 pci_free_consistent(phba->pcidev, mem->size, 2918 mem->va, mem->dma); 2919 } 2920 2921 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 2922 u16 len, u16 entry_size) 2923 { 2924 struct be_dma_mem *mem = &q->dma_mem; 2925 2926 memset(q, 0, sizeof(*q)); 2927 q->len = len; 2928 q->entry_size = entry_size; 2929 mem->size = len * entry_size; 2930 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); 2931 if (!mem->va) 2932 return -ENOMEM; 2933 memset(mem->va, 0, mem->size); 2934 return 0; 2935 } 2936 2937 static int 2938 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 2939 struct hwi_context_memory *phwi_context, 2940 struct hwi_controller *phwi_ctrlr) 2941 { 2942 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 2943 u64 pa_addr_lo; 2944 unsigned int idx, num, i; 2945 struct mem_array *pwrb_arr; 2946 void *wrb_vaddr; 2947 struct be_dma_mem sgl; 2948 struct be_mem_descriptor *mem_descr; 2949 int status; 2950 2951 idx = 0; 2952 mem_descr = phba->init_mem; 2953 mem_descr += HWI_MEM_WRB; 2954 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 2955 GFP_KERNEL); 2956 if (!pwrb_arr) { 2957 shost_printk(KERN_ERR, phba->shost, 2958 "Memory alloc failed in create wrb ring.\n"); 2959 return -ENOMEM; 2960 } 2961 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 2962 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 2963 num_wrb_rings = mem_descr->mem_array[idx].size / 2964 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 2965 2966 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 2967 if (num_wrb_rings) { 2968 pwrb_arr[num].virtual_address = wrb_vaddr; 2969 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 2970 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 2971 sizeof(struct iscsi_wrb); 2972 wrb_vaddr += pwrb_arr[num].size; 2973 pa_addr_lo += pwrb_arr[num].size; 2974 num_wrb_rings--; 2975 } else { 2976 idx++; 2977 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 2978 pa_addr_lo = mem_descr->mem_array[idx].\ 2979 bus_address.u.a64.address; 2980 num_wrb_rings = mem_descr->mem_array[idx].size / 2981 (phba->params.wrbs_per_cxn * 2982 sizeof(struct iscsi_wrb)); 2983 pwrb_arr[num].virtual_address = wrb_vaddr; 2984 pwrb_arr[num].bus_address.u.a64.address\ 2985 = pa_addr_lo; 2986 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 2987 sizeof(struct iscsi_wrb); 2988 wrb_vaddr += pwrb_arr[num].size; 2989 pa_addr_lo += pwrb_arr[num].size; 2990 num_wrb_rings--; 2991 } 2992 } 2993 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 2994 wrb_mem_index = 0; 2995 offset = 0; 2996 size = 0; 2997 2998 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 2999 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3000 &phwi_context->be_wrbq[i]); 3001 if (status != 0) { 3002 shost_printk(KERN_ERR, phba->shost, 3003 "wrbq create failed."); 3004 kfree(pwrb_arr); 3005 return status; 3006 } 3007 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. 3008 id; 3009 } 3010 kfree(pwrb_arr); 3011 return 0; 3012 } 3013 3014 static void free_wrb_handles(struct beiscsi_hba *phba) 3015 { 3016 unsigned int index; 3017 struct hwi_controller *phwi_ctrlr; 3018 struct hwi_wrb_context *pwrb_context; 3019 3020 phwi_ctrlr = phba->phwi_ctrlr; 3021 for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { 3022 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3023 kfree(pwrb_context->pwrb_handle_base); 3024 kfree(pwrb_context->pwrb_handle_basestd); 3025 } 3026 } 3027 3028 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3029 { 3030 struct be_queue_info *q; 3031 struct be_ctrl_info *ctrl = &phba->ctrl; 3032 3033 q = &phba->ctrl.mcc_obj.q; 3034 if (q->created) 3035 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3036 be_queue_free(phba, q); 3037 3038 q = &phba->ctrl.mcc_obj.cq; 3039 if (q->created) 3040 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3041 be_queue_free(phba, q); 3042 } 3043 3044 static void hwi_cleanup(struct beiscsi_hba *phba) 3045 { 3046 struct be_queue_info *q; 3047 struct be_ctrl_info *ctrl = &phba->ctrl; 3048 struct hwi_controller *phwi_ctrlr; 3049 struct hwi_context_memory *phwi_context; 3050 int i, eq_num; 3051 3052 phwi_ctrlr = phba->phwi_ctrlr; 3053 phwi_context = phwi_ctrlr->phwi_ctxt; 3054 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3055 q = &phwi_context->be_wrbq[i]; 3056 if (q->created) 3057 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3058 } 3059 free_wrb_handles(phba); 3060 3061 q = &phwi_context->be_def_hdrq; 3062 if (q->created) 3063 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3064 3065 q = &phwi_context->be_def_dataq; 3066 if (q->created) 3067 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3068 3069 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3070 3071 for (i = 0; i < (phba->num_cpus); i++) { 3072 q = &phwi_context->be_cq[i]; 3073 if (q->created) 3074 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3075 } 3076 if (phba->msix_enabled) 3077 eq_num = 1; 3078 else 3079 eq_num = 0; 3080 for (i = 0; i < (phba->num_cpus + eq_num); i++) { 3081 q = &phwi_context->be_eq[i].q; 3082 if (q->created) 3083 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3084 } 3085 be_mcc_queues_destroy(phba); 3086 } 3087 3088 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3089 struct hwi_context_memory *phwi_context) 3090 { 3091 struct be_queue_info *q, *cq; 3092 struct be_ctrl_info *ctrl = &phba->ctrl; 3093 3094 /* Alloc MCC compl queue */ 3095 cq = &phba->ctrl.mcc_obj.cq; 3096 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3097 sizeof(struct be_mcc_compl))) 3098 goto err; 3099 /* Ask BE to create MCC compl queue; */ 3100 if (phba->msix_enabled) { 3101 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3102 [phba->num_cpus].q, false, true, 0)) 3103 goto mcc_cq_free; 3104 } else { 3105 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3106 false, true, 0)) 3107 goto mcc_cq_free; 3108 } 3109 3110 /* Alloc MCC queue */ 3111 q = &phba->ctrl.mcc_obj.q; 3112 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3113 goto mcc_cq_destroy; 3114 3115 /* Ask BE to create MCC queue */ 3116 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3117 goto mcc_q_free; 3118 3119 return 0; 3120 3121 mcc_q_free: 3122 be_queue_free(phba, q); 3123 mcc_cq_destroy: 3124 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3125 mcc_cq_free: 3126 be_queue_free(phba, cq); 3127 err: 3128 return -ENOMEM; 3129 } 3130 3131 static int find_num_cpus(void) 3132 { 3133 int num_cpus = 0; 3134 3135 num_cpus = num_online_cpus(); 3136 if (num_cpus >= MAX_CPUS) 3137 num_cpus = MAX_CPUS - 1; 3138 3139 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus); 3140 return num_cpus; 3141 } 3142 3143 static int hwi_init_port(struct beiscsi_hba *phba) 3144 { 3145 struct hwi_controller *phwi_ctrlr; 3146 struct hwi_context_memory *phwi_context; 3147 unsigned int def_pdu_ring_sz; 3148 struct be_ctrl_info *ctrl = &phba->ctrl; 3149 int status; 3150 3151 def_pdu_ring_sz = 3152 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 3153 phwi_ctrlr = phba->phwi_ctrlr; 3154 phwi_context = phwi_ctrlr->phwi_ctxt; 3155 phwi_context->max_eqd = 0; 3156 phwi_context->min_eqd = 0; 3157 phwi_context->cur_eqd = 64; 3158 be_cmd_fw_initialize(&phba->ctrl); 3159 3160 status = beiscsi_create_eqs(phba, phwi_context); 3161 if (status != 0) { 3162 shost_printk(KERN_ERR, phba->shost, "EQ not created\n"); 3163 goto error; 3164 } 3165 3166 status = be_mcc_queues_create(phba, phwi_context); 3167 if (status != 0) 3168 goto error; 3169 3170 status = mgmt_check_supported_fw(ctrl, phba); 3171 if (status != 0) { 3172 shost_printk(KERN_ERR, phba->shost, 3173 "Unsupported fw version\n"); 3174 goto error; 3175 } 3176 3177 status = beiscsi_create_cqs(phba, phwi_context); 3178 if (status != 0) { 3179 shost_printk(KERN_ERR, phba->shost, "CQ not created\n"); 3180 goto error; 3181 } 3182 3183 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3184 def_pdu_ring_sz); 3185 if (status != 0) { 3186 shost_printk(KERN_ERR, phba->shost, 3187 "Default Header not created\n"); 3188 goto error; 3189 } 3190 3191 status = beiscsi_create_def_data(phba, phwi_context, 3192 phwi_ctrlr, def_pdu_ring_sz); 3193 if (status != 0) { 3194 shost_printk(KERN_ERR, phba->shost, 3195 "Default Data not created\n"); 3196 goto error; 3197 } 3198 3199 status = beiscsi_post_pages(phba); 3200 if (status != 0) { 3201 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n"); 3202 goto error; 3203 } 3204 3205 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3206 if (status != 0) { 3207 shost_printk(KERN_ERR, phba->shost, 3208 "WRB Rings not created\n"); 3209 goto error; 3210 } 3211 3212 SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n"); 3213 return 0; 3214 3215 error: 3216 shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed"); 3217 hwi_cleanup(phba); 3218 return -ENOMEM; 3219 } 3220 3221 static int hwi_init_controller(struct beiscsi_hba *phba) 3222 { 3223 struct hwi_controller *phwi_ctrlr; 3224 3225 phwi_ctrlr = phba->phwi_ctrlr; 3226 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3227 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3228 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3229 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n", 3230 phwi_ctrlr->phwi_ctxt); 3231 } else { 3232 shost_printk(KERN_ERR, phba->shost, 3233 "HWI_MEM_ADDN_CONTEXT is more than one element." 3234 "Failing to load\n"); 3235 return -ENOMEM; 3236 } 3237 3238 iscsi_init_global_templates(phba); 3239 beiscsi_init_wrb_handle(phba); 3240 hwi_init_async_pdu_ctx(phba); 3241 if (hwi_init_port(phba) != 0) { 3242 shost_printk(KERN_ERR, phba->shost, 3243 "hwi_init_controller failed\n"); 3244 return -ENOMEM; 3245 } 3246 return 0; 3247 } 3248 3249 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3250 { 3251 struct be_mem_descriptor *mem_descr; 3252 int i, j; 3253 3254 mem_descr = phba->init_mem; 3255 i = 0; 3256 j = 0; 3257 for (i = 0; i < SE_MEM_MAX; i++) { 3258 for (j = mem_descr->num_elements; j > 0; j--) { 3259 pci_free_consistent(phba->pcidev, 3260 mem_descr->mem_array[j - 1].size, 3261 mem_descr->mem_array[j - 1].virtual_address, 3262 (unsigned long)mem_descr->mem_array[j - 1]. 3263 bus_address.u.a64.address); 3264 } 3265 kfree(mem_descr->mem_array); 3266 mem_descr++; 3267 } 3268 kfree(phba->init_mem); 3269 kfree(phba->phwi_ctrlr); 3270 } 3271 3272 static int beiscsi_init_controller(struct beiscsi_hba *phba) 3273 { 3274 int ret = -ENOMEM; 3275 3276 ret = beiscsi_get_memory(phba); 3277 if (ret < 0) { 3278 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -" 3279 "Failed in beiscsi_alloc_memory\n"); 3280 return ret; 3281 } 3282 3283 ret = hwi_init_controller(phba); 3284 if (ret) 3285 goto free_init; 3286 SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller"); 3287 return 0; 3288 3289 free_init: 3290 beiscsi_free_mem(phba); 3291 return -ENOMEM; 3292 } 3293 3294 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3295 { 3296 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3297 struct sgl_handle *psgl_handle; 3298 struct iscsi_sge *pfrag; 3299 unsigned int arr_index, i, idx; 3300 3301 phba->io_sgl_hndl_avbl = 0; 3302 phba->eh_sgl_hndl_avbl = 0; 3303 3304 mem_descr_sglh = phba->init_mem; 3305 mem_descr_sglh += HWI_MEM_SGLH; 3306 if (1 == mem_descr_sglh->num_elements) { 3307 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3308 phba->params.ios_per_ctrl, 3309 GFP_KERNEL); 3310 if (!phba->io_sgl_hndl_base) { 3311 shost_printk(KERN_ERR, phba->shost, 3312 "Mem Alloc Failed. Failing to load\n"); 3313 return -ENOMEM; 3314 } 3315 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3316 (phba->params.icds_per_ctrl - 3317 phba->params.ios_per_ctrl), 3318 GFP_KERNEL); 3319 if (!phba->eh_sgl_hndl_base) { 3320 kfree(phba->io_sgl_hndl_base); 3321 shost_printk(KERN_ERR, phba->shost, 3322 "Mem Alloc Failed. Failing to load\n"); 3323 return -ENOMEM; 3324 } 3325 } else { 3326 shost_printk(KERN_ERR, phba->shost, 3327 "HWI_MEM_SGLH is more than one element." 3328 "Failing to load\n"); 3329 return -ENOMEM; 3330 } 3331 3332 arr_index = 0; 3333 idx = 0; 3334 while (idx < mem_descr_sglh->num_elements) { 3335 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3336 3337 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3338 sizeof(struct sgl_handle)); i++) { 3339 if (arr_index < phba->params.ios_per_ctrl) { 3340 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3341 phba->io_sgl_hndl_avbl++; 3342 arr_index++; 3343 } else { 3344 phba->eh_sgl_hndl_base[arr_index - 3345 phba->params.ios_per_ctrl] = 3346 psgl_handle; 3347 arr_index++; 3348 phba->eh_sgl_hndl_avbl++; 3349 } 3350 psgl_handle++; 3351 } 3352 idx++; 3353 } 3354 SE_DEBUG(DBG_LVL_8, 3355 "phba->io_sgl_hndl_avbl=%d" 3356 "phba->eh_sgl_hndl_avbl=%d\n", 3357 phba->io_sgl_hndl_avbl, 3358 phba->eh_sgl_hndl_avbl); 3359 mem_descr_sg = phba->init_mem; 3360 mem_descr_sg += HWI_MEM_SGE; 3361 SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n", 3362 mem_descr_sg->num_elements); 3363 arr_index = 0; 3364 idx = 0; 3365 while (idx < mem_descr_sg->num_elements) { 3366 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3367 3368 for (i = 0; 3369 i < (mem_descr_sg->mem_array[idx].size) / 3370 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3371 i++) { 3372 if (arr_index < phba->params.ios_per_ctrl) 3373 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3374 else 3375 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3376 phba->params.ios_per_ctrl]; 3377 psgl_handle->pfrag = pfrag; 3378 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3379 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3380 pfrag += phba->params.num_sge_per_io; 3381 psgl_handle->sgl_index = 3382 phba->fw_config.iscsi_icd_start + arr_index++; 3383 } 3384 idx++; 3385 } 3386 phba->io_sgl_free_index = 0; 3387 phba->io_sgl_alloc_index = 0; 3388 phba->eh_sgl_free_index = 0; 3389 phba->eh_sgl_alloc_index = 0; 3390 return 0; 3391 } 3392 3393 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3394 { 3395 int i, new_cid; 3396 3397 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3398 GFP_KERNEL); 3399 if (!phba->cid_array) { 3400 shost_printk(KERN_ERR, phba->shost, 3401 "Failed to allocate memory in " 3402 "hba_setup_cid_tbls\n"); 3403 return -ENOMEM; 3404 } 3405 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3406 phba->params.cxns_per_ctrl * 2, GFP_KERNEL); 3407 if (!phba->ep_array) { 3408 shost_printk(KERN_ERR, phba->shost, 3409 "Failed to allocate memory in " 3410 "hba_setup_cid_tbls\n"); 3411 kfree(phba->cid_array); 3412 return -ENOMEM; 3413 } 3414 new_cid = phba->fw_config.iscsi_cid_start; 3415 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3416 phba->cid_array[i] = new_cid; 3417 new_cid += 2; 3418 } 3419 phba->avlbl_cids = phba->params.cxns_per_ctrl; 3420 return 0; 3421 } 3422 3423 static void hwi_enable_intr(struct beiscsi_hba *phba) 3424 { 3425 struct be_ctrl_info *ctrl = &phba->ctrl; 3426 struct hwi_controller *phwi_ctrlr; 3427 struct hwi_context_memory *phwi_context; 3428 struct be_queue_info *eq; 3429 u8 __iomem *addr; 3430 u32 reg, i; 3431 u32 enabled; 3432 3433 phwi_ctrlr = phba->phwi_ctrlr; 3434 phwi_context = phwi_ctrlr->phwi_ctxt; 3435 3436 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3437 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3438 reg = ioread32(addr); 3439 3440 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3441 if (!enabled) { 3442 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3443 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr); 3444 iowrite32(reg, addr); 3445 } 3446 3447 if (!phba->msix_enabled) { 3448 eq = &phwi_context->be_eq[0].q; 3449 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3450 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3451 } else { 3452 for (i = 0; i <= phba->num_cpus; i++) { 3453 eq = &phwi_context->be_eq[i].q; 3454 SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id); 3455 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3456 } 3457 } 3458 } 3459 3460 static void hwi_disable_intr(struct beiscsi_hba *phba) 3461 { 3462 struct be_ctrl_info *ctrl = &phba->ctrl; 3463 3464 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 3465 u32 reg = ioread32(addr); 3466 3467 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3468 if (enabled) { 3469 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3470 iowrite32(reg, addr); 3471 } else 3472 shost_printk(KERN_WARNING, phba->shost, 3473 "In hwi_disable_intr, Already Disabled\n"); 3474 } 3475 3476 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 3477 { 3478 struct be_cmd_resp_get_boot_target *boot_resp; 3479 struct be_cmd_resp_get_session *session_resp; 3480 struct be_mcc_wrb *wrb; 3481 struct be_dma_mem nonemb_cmd; 3482 unsigned int tag, wrb_num; 3483 unsigned short status, extd_status; 3484 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; 3485 int ret = -ENOMEM; 3486 3487 tag = beiscsi_get_boot_target(phba); 3488 if (!tag) { 3489 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n"); 3490 return -EAGAIN; 3491 } else 3492 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3493 phba->ctrl.mcc_numtag[tag]); 3494 3495 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; 3496 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 3497 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 3498 if (status || extd_status) { 3499 SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed" 3500 " status = %d extd_status = %d\n", 3501 status, extd_status); 3502 free_mcc_tag(&phba->ctrl, tag); 3503 return -EBUSY; 3504 } 3505 wrb = queue_get_wrb(mccq, wrb_num); 3506 free_mcc_tag(&phba->ctrl, tag); 3507 boot_resp = embedded_payload(wrb); 3508 3509 if (boot_resp->boot_session_handle < 0) { 3510 shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n"); 3511 return -ENXIO; 3512 } 3513 3514 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 3515 sizeof(*session_resp), 3516 &nonemb_cmd.dma); 3517 if (nonemb_cmd.va == NULL) { 3518 SE_DEBUG(DBG_LVL_1, 3519 "Failed to allocate memory for" 3520 "beiscsi_get_session_info\n"); 3521 return -ENOMEM; 3522 } 3523 3524 memset(nonemb_cmd.va, 0, sizeof(*session_resp)); 3525 tag = beiscsi_get_session_info(phba, 3526 boot_resp->boot_session_handle, &nonemb_cmd); 3527 if (!tag) { 3528 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info" 3529 " Failed\n"); 3530 goto boot_freemem; 3531 } else 3532 wait_event_interruptible(phba->ctrl.mcc_wait[tag], 3533 phba->ctrl.mcc_numtag[tag]); 3534 3535 wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16; 3536 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 3537 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 3538 if (status || extd_status) { 3539 SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed" 3540 " status = %d extd_status = %d\n", 3541 status, extd_status); 3542 free_mcc_tag(&phba->ctrl, tag); 3543 goto boot_freemem; 3544 } 3545 wrb = queue_get_wrb(mccq, wrb_num); 3546 free_mcc_tag(&phba->ctrl, tag); 3547 session_resp = nonemb_cmd.va ; 3548 3549 memcpy(&phba->boot_sess, &session_resp->session_info, 3550 sizeof(struct mgmt_session_info)); 3551 ret = 0; 3552 3553 boot_freemem: 3554 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 3555 nonemb_cmd.va, nonemb_cmd.dma); 3556 return ret; 3557 } 3558 3559 static void beiscsi_boot_release(void *data) 3560 { 3561 struct beiscsi_hba *phba = data; 3562 3563 scsi_host_put(phba->shost); 3564 } 3565 3566 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 3567 { 3568 struct iscsi_boot_kobj *boot_kobj; 3569 3570 /* get boot info using mgmt cmd */ 3571 if (beiscsi_get_boot_info(phba)) 3572 /* Try to see if we can carry on without this */ 3573 return 0; 3574 3575 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 3576 if (!phba->boot_kset) 3577 return -ENOMEM; 3578 3579 /* get a ref because the show function will ref the phba */ 3580 if (!scsi_host_get(phba->shost)) 3581 goto free_kset; 3582 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 3583 beiscsi_show_boot_tgt_info, 3584 beiscsi_tgt_get_attr_visibility, 3585 beiscsi_boot_release); 3586 if (!boot_kobj) 3587 goto put_shost; 3588 3589 if (!scsi_host_get(phba->shost)) 3590 goto free_kset; 3591 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 3592 beiscsi_show_boot_ini_info, 3593 beiscsi_ini_get_attr_visibility, 3594 beiscsi_boot_release); 3595 if (!boot_kobj) 3596 goto put_shost; 3597 3598 if (!scsi_host_get(phba->shost)) 3599 goto free_kset; 3600 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 3601 beiscsi_show_boot_eth_info, 3602 beiscsi_eth_get_attr_visibility, 3603 beiscsi_boot_release); 3604 if (!boot_kobj) 3605 goto put_shost; 3606 return 0; 3607 3608 put_shost: 3609 scsi_host_put(phba->shost); 3610 free_kset: 3611 iscsi_boot_destroy_kset(phba->boot_kset); 3612 return -ENOMEM; 3613 } 3614 3615 static int beiscsi_init_port(struct beiscsi_hba *phba) 3616 { 3617 int ret; 3618 3619 ret = beiscsi_init_controller(phba); 3620 if (ret < 0) { 3621 shost_printk(KERN_ERR, phba->shost, 3622 "beiscsi_dev_probe - Failed in" 3623 "beiscsi_init_controller\n"); 3624 return ret; 3625 } 3626 ret = beiscsi_init_sgl_handle(phba); 3627 if (ret < 0) { 3628 shost_printk(KERN_ERR, phba->shost, 3629 "beiscsi_dev_probe - Failed in" 3630 "beiscsi_init_sgl_handle\n"); 3631 goto do_cleanup_ctrlr; 3632 } 3633 3634 if (hba_setup_cid_tbls(phba)) { 3635 shost_printk(KERN_ERR, phba->shost, 3636 "Failed in hba_setup_cid_tbls\n"); 3637 kfree(phba->io_sgl_hndl_base); 3638 kfree(phba->eh_sgl_hndl_base); 3639 goto do_cleanup_ctrlr; 3640 } 3641 3642 return ret; 3643 3644 do_cleanup_ctrlr: 3645 hwi_cleanup(phba); 3646 return ret; 3647 } 3648 3649 static void hwi_purge_eq(struct beiscsi_hba *phba) 3650 { 3651 struct hwi_controller *phwi_ctrlr; 3652 struct hwi_context_memory *phwi_context; 3653 struct be_queue_info *eq; 3654 struct be_eq_entry *eqe = NULL; 3655 int i, eq_msix; 3656 unsigned int num_processed; 3657 3658 phwi_ctrlr = phba->phwi_ctrlr; 3659 phwi_context = phwi_ctrlr->phwi_ctxt; 3660 if (phba->msix_enabled) 3661 eq_msix = 1; 3662 else 3663 eq_msix = 0; 3664 3665 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3666 eq = &phwi_context->be_eq[i].q; 3667 eqe = queue_tail_node(eq); 3668 num_processed = 0; 3669 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3670 & EQE_VALID_MASK) { 3671 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3672 queue_tail_inc(eq); 3673 eqe = queue_tail_node(eq); 3674 num_processed++; 3675 } 3676 3677 if (num_processed) 3678 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3679 } 3680 } 3681 3682 static void beiscsi_clean_port(struct beiscsi_hba *phba) 3683 { 3684 int mgmt_status; 3685 3686 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 3687 if (mgmt_status) 3688 shost_printk(KERN_WARNING, phba->shost, 3689 "mgmt_epfw_cleanup FAILED\n"); 3690 3691 hwi_purge_eq(phba); 3692 hwi_cleanup(phba); 3693 kfree(phba->io_sgl_hndl_base); 3694 kfree(phba->eh_sgl_hndl_base); 3695 kfree(phba->cid_array); 3696 kfree(phba->ep_array); 3697 } 3698 3699 void 3700 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 3701 struct beiscsi_offload_params *params) 3702 { 3703 struct wrb_handle *pwrb_handle; 3704 struct iscsi_target_context_update_wrb *pwrb = NULL; 3705 struct be_mem_descriptor *mem_descr; 3706 struct beiscsi_hba *phba = beiscsi_conn->phba; 3707 u32 doorbell = 0; 3708 3709 /* 3710 * We can always use 0 here because it is reserved by libiscsi for 3711 * login/startup related tasks. 3712 */ 3713 pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - 3714 phba->fw_config.iscsi_cid_start)); 3715 pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb; 3716 memset(pwrb, 0, sizeof(*pwrb)); 3717 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3718 max_burst_length, pwrb, params->dw[offsetof 3719 (struct amap_beiscsi_offload_params, 3720 max_burst_length) / 32]); 3721 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3722 max_send_data_segment_length, pwrb, 3723 params->dw[offsetof(struct amap_beiscsi_offload_params, 3724 max_send_data_segment_length) / 32]); 3725 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3726 first_burst_length, 3727 pwrb, 3728 params->dw[offsetof(struct amap_beiscsi_offload_params, 3729 first_burst_length) / 32]); 3730 3731 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb, 3732 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3733 erl) / 32] & OFFLD_PARAMS_ERL)); 3734 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb, 3735 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3736 dde) / 32] & OFFLD_PARAMS_DDE) >> 2); 3737 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb, 3738 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3739 hde) / 32] & OFFLD_PARAMS_HDE) >> 3); 3740 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb, 3741 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3742 ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4); 3743 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb, 3744 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3745 imd) / 32] & OFFLD_PARAMS_IMD) >> 5); 3746 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn, 3747 pwrb, 3748 (params->dw[offsetof(struct amap_beiscsi_offload_params, 3749 exp_statsn) / 32] + 1)); 3750 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb, 3751 0x7); 3752 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx, 3753 pwrb, pwrb_handle->wrb_index); 3754 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb, 3755 pwrb, pwrb_handle->nxt_wrb_index); 3756 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3757 session_state, pwrb, 0); 3758 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack, 3759 pwrb, 1); 3760 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq, 3761 pwrb, 0); 3762 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb, 3763 0); 3764 3765 mem_descr = phba->init_mem; 3766 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 3767 3768 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3769 pad_buffer_addr_hi, pwrb, 3770 mem_descr->mem_array[0].bus_address.u.a32.address_hi); 3771 AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, 3772 pad_buffer_addr_lo, pwrb, 3773 mem_descr->mem_array[0].bus_address.u.a32.address_lo); 3774 3775 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb)); 3776 3777 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 3778 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 3779 << DB_DEF_PDU_WRB_INDEX_SHIFT; 3780 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 3781 3782 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 3783 } 3784 3785 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 3786 int *index, int *age) 3787 { 3788 *index = (int)itt; 3789 if (age) 3790 *age = conn->session->age; 3791 } 3792 3793 /** 3794 * beiscsi_alloc_pdu - allocates pdu and related resources 3795 * @task: libiscsi task 3796 * @opcode: opcode of pdu for task 3797 * 3798 * This is called with the session lock held. It will allocate 3799 * the wrb and sgl if needed for the command. And it will prep 3800 * the pdu's itt. beiscsi_parse_pdu will later translate 3801 * the pdu itt to the libiscsi task itt. 3802 */ 3803 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 3804 { 3805 struct beiscsi_io_task *io_task = task->dd_data; 3806 struct iscsi_conn *conn = task->conn; 3807 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3808 struct beiscsi_hba *phba = beiscsi_conn->phba; 3809 struct hwi_wrb_context *pwrb_context; 3810 struct hwi_controller *phwi_ctrlr; 3811 itt_t itt; 3812 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 3813 dma_addr_t paddr; 3814 3815 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 3816 GFP_ATOMIC, &paddr); 3817 if (!io_task->cmd_bhs) 3818 return -ENOMEM; 3819 io_task->bhs_pa.u.a64.address = paddr; 3820 io_task->libiscsi_itt = (itt_t)task->itt; 3821 io_task->conn = beiscsi_conn; 3822 3823 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 3824 task->hdr_max = sizeof(struct be_cmd_bhs); 3825 io_task->psgl_handle = NULL; 3826 io_task->psgl_handle = NULL; 3827 3828 if (task->sc) { 3829 spin_lock(&phba->io_sgl_lock); 3830 io_task->psgl_handle = alloc_io_sgl_handle(phba); 3831 spin_unlock(&phba->io_sgl_lock); 3832 if (!io_task->psgl_handle) 3833 goto free_hndls; 3834 io_task->pwrb_handle = alloc_wrb_handle(phba, 3835 beiscsi_conn->beiscsi_conn_cid - 3836 phba->fw_config.iscsi_cid_start); 3837 if (!io_task->pwrb_handle) 3838 goto free_io_hndls; 3839 } else { 3840 io_task->scsi_cmnd = NULL; 3841 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 3842 if (!beiscsi_conn->login_in_progress) { 3843 spin_lock(&phba->mgmt_sgl_lock); 3844 io_task->psgl_handle = (struct sgl_handle *) 3845 alloc_mgmt_sgl_handle(phba); 3846 spin_unlock(&phba->mgmt_sgl_lock); 3847 if (!io_task->psgl_handle) 3848 goto free_hndls; 3849 3850 beiscsi_conn->login_in_progress = 1; 3851 beiscsi_conn->plogin_sgl_handle = 3852 io_task->psgl_handle; 3853 io_task->pwrb_handle = 3854 alloc_wrb_handle(phba, 3855 beiscsi_conn->beiscsi_conn_cid - 3856 phba->fw_config.iscsi_cid_start); 3857 if (!io_task->pwrb_handle) 3858 goto free_io_hndls; 3859 beiscsi_conn->plogin_wrb_handle = 3860 io_task->pwrb_handle; 3861 3862 } else { 3863 io_task->psgl_handle = 3864 beiscsi_conn->plogin_sgl_handle; 3865 io_task->pwrb_handle = 3866 beiscsi_conn->plogin_wrb_handle; 3867 } 3868 } else { 3869 spin_lock(&phba->mgmt_sgl_lock); 3870 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 3871 spin_unlock(&phba->mgmt_sgl_lock); 3872 if (!io_task->psgl_handle) 3873 goto free_hndls; 3874 io_task->pwrb_handle = 3875 alloc_wrb_handle(phba, 3876 beiscsi_conn->beiscsi_conn_cid - 3877 phba->fw_config.iscsi_cid_start); 3878 if (!io_task->pwrb_handle) 3879 goto free_mgmt_hndls; 3880 3881 } 3882 } 3883 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 3884 wrb_index << 16) | (unsigned int) 3885 (io_task->psgl_handle->sgl_index)); 3886 io_task->pwrb_handle->pio_handle = task; 3887 3888 io_task->cmd_bhs->iscsi_hdr.itt = itt; 3889 return 0; 3890 3891 free_io_hndls: 3892 spin_lock(&phba->io_sgl_lock); 3893 free_io_sgl_handle(phba, io_task->psgl_handle); 3894 spin_unlock(&phba->io_sgl_lock); 3895 goto free_hndls; 3896 free_mgmt_hndls: 3897 spin_lock(&phba->mgmt_sgl_lock); 3898 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 3899 spin_unlock(&phba->mgmt_sgl_lock); 3900 free_hndls: 3901 phwi_ctrlr = phba->phwi_ctrlr; 3902 pwrb_context = &phwi_ctrlr->wrb_context[ 3903 beiscsi_conn->beiscsi_conn_cid - 3904 phba->fw_config.iscsi_cid_start]; 3905 if (io_task->pwrb_handle) 3906 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3907 io_task->pwrb_handle = NULL; 3908 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3909 io_task->bhs_pa.u.a64.address); 3910 SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n"); 3911 return -ENOMEM; 3912 } 3913 3914 static void beiscsi_cleanup_task(struct iscsi_task *task) 3915 { 3916 struct beiscsi_io_task *io_task = task->dd_data; 3917 struct iscsi_conn *conn = task->conn; 3918 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3919 struct beiscsi_hba *phba = beiscsi_conn->phba; 3920 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 3921 struct hwi_wrb_context *pwrb_context; 3922 struct hwi_controller *phwi_ctrlr; 3923 3924 phwi_ctrlr = phba->phwi_ctrlr; 3925 pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid 3926 - phba->fw_config.iscsi_cid_start]; 3927 if (io_task->pwrb_handle) { 3928 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 3929 io_task->pwrb_handle = NULL; 3930 } 3931 3932 if (io_task->cmd_bhs) { 3933 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 3934 io_task->bhs_pa.u.a64.address); 3935 } 3936 3937 if (task->sc) { 3938 if (io_task->psgl_handle) { 3939 spin_lock(&phba->io_sgl_lock); 3940 free_io_sgl_handle(phba, io_task->psgl_handle); 3941 spin_unlock(&phba->io_sgl_lock); 3942 io_task->psgl_handle = NULL; 3943 } 3944 } else { 3945 if (task->hdr && 3946 ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)) 3947 return; 3948 if (io_task->psgl_handle) { 3949 spin_lock(&phba->mgmt_sgl_lock); 3950 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 3951 spin_unlock(&phba->mgmt_sgl_lock); 3952 io_task->psgl_handle = NULL; 3953 } 3954 } 3955 } 3956 3957 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 3958 unsigned int num_sg, unsigned int xferlen, 3959 unsigned int writedir) 3960 { 3961 3962 struct beiscsi_io_task *io_task = task->dd_data; 3963 struct iscsi_conn *conn = task->conn; 3964 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 3965 struct beiscsi_hba *phba = beiscsi_conn->phba; 3966 struct iscsi_wrb *pwrb = NULL; 3967 unsigned int doorbell = 0; 3968 3969 pwrb = io_task->pwrb_handle->pwrb; 3970 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 3971 io_task->bhs_len = sizeof(struct be_cmd_bhs); 3972 3973 if (writedir) { 3974 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48); 3975 AMAP_SET_BITS(struct amap_pdu_data_out, itt, 3976 &io_task->cmd_bhs->iscsi_data_pdu, 3977 (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt); 3978 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, 3979 &io_task->cmd_bhs->iscsi_data_pdu, 3980 ISCSI_OPCODE_SCSI_DATA_OUT); 3981 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit, 3982 &io_task->cmd_bhs->iscsi_data_pdu, 1); 3983 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3984 INI_WR_CMD); 3985 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 3986 } else { 3987 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 3988 INI_RD_CMD); 3989 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 3990 } 3991 memcpy(&io_task->cmd_bhs->iscsi_data_pdu. 3992 dw[offsetof(struct amap_pdu_data_out, lun) / 32], 3993 &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun)); 3994 3995 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 3996 cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun)); 3997 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 3998 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 3999 io_task->pwrb_handle->wrb_index); 4000 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4001 be32_to_cpu(task->cmdsn)); 4002 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4003 io_task->psgl_handle->sgl_index); 4004 4005 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4006 4007 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4008 io_task->pwrb_handle->nxt_wrb_index); 4009 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4010 4011 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4012 doorbell |= (io_task->pwrb_handle->wrb_index & 4013 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4014 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4015 4016 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4017 return 0; 4018 } 4019 4020 static int beiscsi_mtask(struct iscsi_task *task) 4021 { 4022 struct beiscsi_io_task *io_task = task->dd_data; 4023 struct iscsi_conn *conn = task->conn; 4024 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4025 struct beiscsi_hba *phba = beiscsi_conn->phba; 4026 struct iscsi_wrb *pwrb = NULL; 4027 unsigned int doorbell = 0; 4028 unsigned int cid; 4029 4030 cid = beiscsi_conn->beiscsi_conn_cid; 4031 pwrb = io_task->pwrb_handle->pwrb; 4032 memset(pwrb, 0, sizeof(*pwrb)); 4033 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4034 be32_to_cpu(task->cmdsn)); 4035 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4036 io_task->pwrb_handle->wrb_index); 4037 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4038 io_task->psgl_handle->sgl_index); 4039 4040 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4041 case ISCSI_OP_LOGIN: 4042 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4043 TGT_DM_CMD); 4044 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4045 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4046 hwi_write_buffer(pwrb, task); 4047 break; 4048 case ISCSI_OP_NOOP_OUT: 4049 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4050 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4051 TGT_DM_CMD); 4052 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, 4053 pwrb, 0); 4054 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); 4055 } else { 4056 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4057 INI_RD_CMD); 4058 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4059 } 4060 hwi_write_buffer(pwrb, task); 4061 break; 4062 case ISCSI_OP_TEXT: 4063 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4064 TGT_DM_CMD); 4065 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4066 hwi_write_buffer(pwrb, task); 4067 break; 4068 case ISCSI_OP_SCSI_TMFUNC: 4069 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4070 INI_TMF_CMD); 4071 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4072 hwi_write_buffer(pwrb, task); 4073 break; 4074 case ISCSI_OP_LOGOUT: 4075 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); 4076 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4077 HWH_TYPE_LOGOUT); 4078 hwi_write_buffer(pwrb, task); 4079 break; 4080 4081 default: 4082 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n", 4083 task->hdr->opcode & ISCSI_OPCODE_MASK); 4084 return -EINVAL; 4085 } 4086 4087 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4088 task->data_count); 4089 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4090 io_task->pwrb_handle->nxt_wrb_index); 4091 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4092 4093 doorbell |= cid & DB_WRB_POST_CID_MASK; 4094 doorbell |= (io_task->pwrb_handle->wrb_index & 4095 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4096 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4097 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4098 return 0; 4099 } 4100 4101 static int beiscsi_task_xmit(struct iscsi_task *task) 4102 { 4103 struct beiscsi_io_task *io_task = task->dd_data; 4104 struct scsi_cmnd *sc = task->sc; 4105 struct scatterlist *sg; 4106 int num_sg; 4107 unsigned int writedir = 0, xferlen = 0; 4108 4109 if (!sc) 4110 return beiscsi_mtask(task); 4111 4112 io_task->scsi_cmnd = sc; 4113 num_sg = scsi_dma_map(sc); 4114 if (num_sg < 0) { 4115 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n") 4116 return num_sg; 4117 } 4118 xferlen = scsi_bufflen(sc); 4119 sg = scsi_sglist(sc); 4120 if (sc->sc_data_direction == DMA_TO_DEVICE) { 4121 writedir = 1; 4122 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n", 4123 task->imm_count); 4124 } else 4125 writedir = 0; 4126 return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); 4127 } 4128 4129 static void beiscsi_quiesce(struct beiscsi_hba *phba) 4130 { 4131 struct hwi_controller *phwi_ctrlr; 4132 struct hwi_context_memory *phwi_context; 4133 struct be_eq_obj *pbe_eq; 4134 unsigned int i, msix_vec; 4135 u8 *real_offset = 0; 4136 u32 value = 0; 4137 4138 phwi_ctrlr = phba->phwi_ctrlr; 4139 phwi_context = phwi_ctrlr->phwi_ctxt; 4140 hwi_disable_intr(phba); 4141 if (phba->msix_enabled) { 4142 for (i = 0; i <= phba->num_cpus; i++) { 4143 msix_vec = phba->msix_entries[i].vector; 4144 free_irq(msix_vec, &phwi_context->be_eq[i]); 4145 kfree(phba->msi_name[i]); 4146 } 4147 } else 4148 if (phba->pcidev->irq) 4149 free_irq(phba->pcidev->irq, phba); 4150 pci_disable_msix(phba->pcidev); 4151 destroy_workqueue(phba->wq); 4152 if (blk_iopoll_enabled) 4153 for (i = 0; i < phba->num_cpus; i++) { 4154 pbe_eq = &phwi_context->be_eq[i]; 4155 blk_iopoll_disable(&pbe_eq->iopoll); 4156 } 4157 4158 beiscsi_clean_port(phba); 4159 beiscsi_free_mem(phba); 4160 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4161 4162 value = readl((void *)real_offset); 4163 4164 if (value & 0x00010000) { 4165 value &= 0xfffeffff; 4166 writel(value, (void *)real_offset); 4167 } 4168 beiscsi_unmap_pci_function(phba); 4169 pci_free_consistent(phba->pcidev, 4170 phba->ctrl.mbox_mem_alloced.size, 4171 phba->ctrl.mbox_mem_alloced.va, 4172 phba->ctrl.mbox_mem_alloced.dma); 4173 } 4174 4175 static void beiscsi_remove(struct pci_dev *pcidev) 4176 { 4177 4178 struct beiscsi_hba *phba = NULL; 4179 4180 phba = pci_get_drvdata(pcidev); 4181 if (!phba) { 4182 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 4183 return; 4184 } 4185 4186 beiscsi_quiesce(phba); 4187 iscsi_boot_destroy_kset(phba->boot_kset); 4188 iscsi_host_remove(phba->shost); 4189 pci_dev_put(phba->pcidev); 4190 iscsi_host_free(phba->shost); 4191 pci_disable_device(pcidev); 4192 } 4193 4194 static void beiscsi_shutdown(struct pci_dev *pcidev) 4195 { 4196 4197 struct beiscsi_hba *phba = NULL; 4198 4199 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 4200 if (!phba) { 4201 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 4202 return; 4203 } 4204 4205 beiscsi_quiesce(phba); 4206 pci_disable_device(pcidev); 4207 } 4208 4209 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4210 { 4211 int i, status; 4212 4213 for (i = 0; i <= phba->num_cpus; i++) 4214 phba->msix_entries[i].entry = i; 4215 4216 status = pci_enable_msix(phba->pcidev, phba->msix_entries, 4217 (phba->num_cpus + 1)); 4218 if (!status) 4219 phba->msix_enabled = true; 4220 4221 return; 4222 } 4223 4224 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, 4225 const struct pci_device_id *id) 4226 { 4227 struct beiscsi_hba *phba = NULL; 4228 struct hwi_controller *phwi_ctrlr; 4229 struct hwi_context_memory *phwi_context; 4230 struct be_eq_obj *pbe_eq; 4231 int ret, num_cpus, i; 4232 u8 *real_offset = 0; 4233 u32 value = 0; 4234 4235 ret = beiscsi_enable_pci(pcidev); 4236 if (ret < 0) { 4237 dev_err(&pcidev->dev, "beiscsi_dev_probe-" 4238 " Failed to enable pci device\n"); 4239 return ret; 4240 } 4241 4242 phba = beiscsi_hba_alloc(pcidev); 4243 if (!phba) { 4244 dev_err(&pcidev->dev, "beiscsi_dev_probe-" 4245 " Failed in beiscsi_hba_alloc\n"); 4246 goto disable_pci; 4247 } 4248 4249 switch (pcidev->device) { 4250 case BE_DEVICE_ID1: 4251 case OC_DEVICE_ID1: 4252 case OC_DEVICE_ID2: 4253 phba->generation = BE_GEN2; 4254 break; 4255 case BE_DEVICE_ID2: 4256 case OC_DEVICE_ID3: 4257 phba->generation = BE_GEN3; 4258 break; 4259 default: 4260 phba->generation = 0; 4261 } 4262 4263 if (enable_msix) 4264 num_cpus = find_num_cpus(); 4265 else 4266 num_cpus = 1; 4267 phba->num_cpus = num_cpus; 4268 SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus); 4269 4270 if (enable_msix) 4271 beiscsi_msix_enable(phba); 4272 ret = be_ctrl_init(phba, pcidev); 4273 if (ret) { 4274 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4275 "Failed in be_ctrl_init\n"); 4276 goto hba_free; 4277 } 4278 4279 if (!num_hba) { 4280 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4281 value = readl((void *)real_offset); 4282 if (value & 0x00010000) { 4283 gcrashmode++; 4284 shost_printk(KERN_ERR, phba->shost, 4285 "Loading Driver in crashdump mode\n"); 4286 ret = beiscsi_cmd_reset_function(phba); 4287 if (ret) { 4288 shost_printk(KERN_ERR, phba->shost, 4289 "Reset Failed. Aborting Crashdump\n"); 4290 goto hba_free; 4291 } 4292 ret = be_chk_reset_complete(phba); 4293 if (ret) { 4294 shost_printk(KERN_ERR, phba->shost, 4295 "Failed to get out of reset." 4296 "Aborting Crashdump\n"); 4297 goto hba_free; 4298 } 4299 } else { 4300 value |= 0x00010000; 4301 writel(value, (void *)real_offset); 4302 num_hba++; 4303 } 4304 } 4305 4306 spin_lock_init(&phba->io_sgl_lock); 4307 spin_lock_init(&phba->mgmt_sgl_lock); 4308 spin_lock_init(&phba->isr_lock); 4309 ret = mgmt_get_fw_config(&phba->ctrl, phba); 4310 if (ret != 0) { 4311 shost_printk(KERN_ERR, phba->shost, 4312 "Error getting fw config\n"); 4313 goto free_port; 4314 } 4315 phba->shost->max_id = phba->fw_config.iscsi_cid_count; 4316 beiscsi_get_params(phba); 4317 phba->shost->can_queue = phba->params.ios_per_ctrl; 4318 ret = beiscsi_init_port(phba); 4319 if (ret < 0) { 4320 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4321 "Failed in beiscsi_init_port\n"); 4322 goto free_port; 4323 } 4324 4325 for (i = 0; i < MAX_MCC_CMD ; i++) { 4326 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 4327 phba->ctrl.mcc_tag[i] = i + 1; 4328 phba->ctrl.mcc_numtag[i + 1] = 0; 4329 phba->ctrl.mcc_tag_available++; 4330 } 4331 4332 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 4333 4334 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u", 4335 phba->shost->host_no); 4336 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); 4337 if (!phba->wq) { 4338 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4339 "Failed to allocate work queue\n"); 4340 goto free_twq; 4341 } 4342 4343 INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs); 4344 4345 phwi_ctrlr = phba->phwi_ctrlr; 4346 phwi_context = phwi_ctrlr->phwi_ctxt; 4347 if (blk_iopoll_enabled) { 4348 for (i = 0; i < phba->num_cpus; i++) { 4349 pbe_eq = &phwi_context->be_eq[i]; 4350 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 4351 be_iopoll); 4352 blk_iopoll_enable(&pbe_eq->iopoll); 4353 } 4354 } 4355 ret = beiscsi_init_irqs(phba); 4356 if (ret < 0) { 4357 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-" 4358 "Failed to beiscsi_init_irqs\n"); 4359 goto free_blkenbld; 4360 } 4361 hwi_enable_intr(phba); 4362 4363 if (beiscsi_setup_boot_info(phba)) 4364 /* 4365 * log error but continue, because we may not be using 4366 * iscsi boot. 4367 */ 4368 shost_printk(KERN_ERR, phba->shost, "Could not set up " 4369 "iSCSI boot info."); 4370 4371 SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n"); 4372 return 0; 4373 4374 free_blkenbld: 4375 destroy_workqueue(phba->wq); 4376 if (blk_iopoll_enabled) 4377 for (i = 0; i < phba->num_cpus; i++) { 4378 pbe_eq = &phwi_context->be_eq[i]; 4379 blk_iopoll_disable(&pbe_eq->iopoll); 4380 } 4381 free_twq: 4382 beiscsi_clean_port(phba); 4383 beiscsi_free_mem(phba); 4384 free_port: 4385 real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE; 4386 4387 value = readl((void *)real_offset); 4388 4389 if (value & 0x00010000) { 4390 value &= 0xfffeffff; 4391 writel(value, (void *)real_offset); 4392 } 4393 4394 pci_free_consistent(phba->pcidev, 4395 phba->ctrl.mbox_mem_alloced.size, 4396 phba->ctrl.mbox_mem_alloced.va, 4397 phba->ctrl.mbox_mem_alloced.dma); 4398 beiscsi_unmap_pci_function(phba); 4399 hba_free: 4400 if (phba->msix_enabled) 4401 pci_disable_msix(phba->pcidev); 4402 iscsi_host_remove(phba->shost); 4403 pci_dev_put(phba->pcidev); 4404 iscsi_host_free(phba->shost); 4405 disable_pci: 4406 pci_disable_device(pcidev); 4407 return ret; 4408 } 4409 4410 struct iscsi_transport beiscsi_iscsi_transport = { 4411 .owner = THIS_MODULE, 4412 .name = DRV_NAME, 4413 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 4414 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 4415 .create_session = beiscsi_session_create, 4416 .destroy_session = beiscsi_session_destroy, 4417 .create_conn = beiscsi_conn_create, 4418 .bind_conn = beiscsi_conn_bind, 4419 .destroy_conn = iscsi_conn_teardown, 4420 .attr_is_visible = be2iscsi_attr_is_visible, 4421 .set_param = beiscsi_set_param, 4422 .get_conn_param = iscsi_conn_get_param, 4423 .get_session_param = iscsi_session_get_param, 4424 .get_host_param = beiscsi_get_host_param, 4425 .start_conn = beiscsi_conn_start, 4426 .stop_conn = iscsi_conn_stop, 4427 .send_pdu = iscsi_conn_send_pdu, 4428 .xmit_task = beiscsi_task_xmit, 4429 .cleanup_task = beiscsi_cleanup_task, 4430 .alloc_pdu = beiscsi_alloc_pdu, 4431 .parse_pdu_itt = beiscsi_parse_pdu, 4432 .get_stats = beiscsi_conn_get_stats, 4433 .get_ep_param = beiscsi_ep_get_param, 4434 .ep_connect = beiscsi_ep_connect, 4435 .ep_poll = beiscsi_ep_poll, 4436 .ep_disconnect = beiscsi_ep_disconnect, 4437 .session_recovery_timedout = iscsi_session_recovery_timedout, 4438 }; 4439 4440 static struct pci_driver beiscsi_pci_driver = { 4441 .name = DRV_NAME, 4442 .probe = beiscsi_dev_probe, 4443 .remove = beiscsi_remove, 4444 .shutdown = beiscsi_shutdown, 4445 .id_table = beiscsi_pci_id_table 4446 }; 4447 4448 4449 static int __init beiscsi_module_init(void) 4450 { 4451 int ret; 4452 4453 beiscsi_scsi_transport = 4454 iscsi_register_transport(&beiscsi_iscsi_transport); 4455 if (!beiscsi_scsi_transport) { 4456 SE_DEBUG(DBG_LVL_1, 4457 "beiscsi_module_init - Unable to register beiscsi" 4458 "transport.\n"); 4459 return -ENOMEM; 4460 } 4461 SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n", 4462 &beiscsi_iscsi_transport); 4463 4464 ret = pci_register_driver(&beiscsi_pci_driver); 4465 if (ret) { 4466 SE_DEBUG(DBG_LVL_1, 4467 "beiscsi_module_init - Unable to register" 4468 "beiscsi pci driver.\n"); 4469 goto unregister_iscsi_transport; 4470 } 4471 return 0; 4472 4473 unregister_iscsi_transport: 4474 iscsi_unregister_transport(&beiscsi_iscsi_transport); 4475 return ret; 4476 } 4477 4478 static void __exit beiscsi_module_exit(void) 4479 { 4480 pci_unregister_driver(&beiscsi_pci_driver); 4481 iscsi_unregister_transport(&beiscsi_iscsi_transport); 4482 } 4483 4484 module_init(beiscsi_module_init); 4485 module_exit(beiscsi_module_exit); 4486