1 /* 2 * Copyright 2017 Broadcom. All Rights Reserved. 3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Contact Information: 11 * linux-drivers@broadcom.com 12 * 13 */ 14 15 #include <linux/reboot.h> 16 #include <linux/delay.h> 17 #include <linux/slab.h> 18 #include <linux/interrupt.h> 19 #include <linux/blkdev.h> 20 #include <linux/pci.h> 21 #include <linux/string.h> 22 #include <linux/kernel.h> 23 #include <linux/semaphore.h> 24 #include <linux/iscsi_boot_sysfs.h> 25 #include <linux/module.h> 26 #include <linux/bsg-lib.h> 27 #include <linux/irq_poll.h> 28 29 #include <scsi/libiscsi.h> 30 #include <scsi/scsi_bsg_iscsi.h> 31 #include <scsi/scsi_netlink.h> 32 #include <scsi/scsi_transport_iscsi.h> 33 #include <scsi/scsi_transport.h> 34 #include <scsi/scsi_cmnd.h> 35 #include <scsi/scsi_device.h> 36 #include <scsi/scsi_host.h> 37 #include <scsi/scsi.h> 38 #include "be_main.h" 39 #include "be_iscsi.h" 40 #include "be_mgmt.h" 41 #include "be_cmds.h" 42 43 static unsigned int be_iopoll_budget = 10; 44 static unsigned int be_max_phys_size = 64; 45 static unsigned int enable_msix = 1; 46 47 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 48 MODULE_VERSION(BUILD_STR); 49 MODULE_AUTHOR("Emulex Corporation"); 50 MODULE_LICENSE("GPL"); 51 module_param(be_iopoll_budget, int, 0); 52 module_param(enable_msix, int, 0); 53 module_param(be_max_phys_size, uint, S_IRUGO); 54 MODULE_PARM_DESC(be_max_phys_size, 55 "Maximum Size (In Kilobytes) of physically contiguous " 56 "memory that can be allocated. Range is 16 - 128"); 57 58 #define beiscsi_disp_param(_name)\ 59 static ssize_t \ 60 beiscsi_##_name##_disp(struct device *dev,\ 61 struct device_attribute *attrib, char *buf) \ 62 { \ 63 struct Scsi_Host *shost = class_to_shost(dev);\ 64 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 65 return snprintf(buf, PAGE_SIZE, "%d\n",\ 66 phba->attr_##_name);\ 67 } 68 69 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 70 static int \ 71 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 72 {\ 73 if (val >= _minval && val <= _maxval) {\ 74 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 75 "BA_%d : beiscsi_"#_name" updated "\ 76 "from 0x%x ==> 0x%x\n",\ 77 phba->attr_##_name, val); \ 78 phba->attr_##_name = val;\ 79 return 0;\ 80 } \ 81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 82 "BA_%d beiscsi_"#_name" attribute "\ 83 "cannot be updated to 0x%x, "\ 84 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 85 return -EINVAL;\ 86 } 87 88 #define beiscsi_store_param(_name) \ 89 static ssize_t \ 90 beiscsi_##_name##_store(struct device *dev,\ 91 struct device_attribute *attr, const char *buf,\ 92 size_t count) \ 93 { \ 94 struct Scsi_Host *shost = class_to_shost(dev);\ 95 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 96 uint32_t param_val = 0;\ 97 if (!isdigit(buf[0]))\ 98 return -EINVAL;\ 99 if (sscanf(buf, "%i", ¶m_val) != 1)\ 100 return -EINVAL;\ 101 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 102 return strlen(buf);\ 103 else \ 104 return -EINVAL;\ 105 } 106 107 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 108 static int \ 109 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 110 { \ 111 if (val >= _minval && val <= _maxval) {\ 112 phba->attr_##_name = val;\ 113 return 0;\ 114 } \ 115 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 116 "BA_%d beiscsi_"#_name" attribute " \ 117 "cannot be updated to 0x%x, "\ 118 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 119 phba->attr_##_name = _defval;\ 120 return -EINVAL;\ 121 } 122 123 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 124 static uint beiscsi_##_name = _defval;\ 125 module_param(beiscsi_##_name, uint, S_IRUGO);\ 126 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 127 beiscsi_disp_param(_name)\ 128 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 129 beiscsi_store_param(_name)\ 130 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 131 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 132 beiscsi_##_name##_disp, beiscsi_##_name##_store) 133 134 /* 135 * When new log level added update the 136 * the MAX allowed value for log_enable 137 */ 138 BEISCSI_RW_ATTR(log_enable, 0x00, 139 0xFF, 0x00, "Enable logging Bit Mask\n" 140 "\t\t\t\tInitialization Events : 0x01\n" 141 "\t\t\t\tMailbox Events : 0x02\n" 142 "\t\t\t\tMiscellaneous Events : 0x04\n" 143 "\t\t\t\tError Handling : 0x08\n" 144 "\t\t\t\tIO Path Events : 0x10\n" 145 "\t\t\t\tConfiguration Path : 0x20\n" 146 "\t\t\t\tiSCSI Protocol : 0x40\n"); 147 148 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 149 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 150 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 151 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 152 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 153 beiscsi_active_session_disp, NULL); 154 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 155 beiscsi_free_session_disp, NULL); 156 struct device_attribute *beiscsi_attrs[] = { 157 &dev_attr_beiscsi_log_enable, 158 &dev_attr_beiscsi_drvr_ver, 159 &dev_attr_beiscsi_adapter_family, 160 &dev_attr_beiscsi_fw_ver, 161 &dev_attr_beiscsi_active_session_count, 162 &dev_attr_beiscsi_free_session_count, 163 &dev_attr_beiscsi_phys_port, 164 NULL, 165 }; 166 167 static char const *cqe_desc[] = { 168 "RESERVED_DESC", 169 "SOL_CMD_COMPLETE", 170 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 171 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 172 "CXN_KILLED_BURST_LEN_MISMATCH", 173 "CXN_KILLED_AHS_RCVD", 174 "CXN_KILLED_HDR_DIGEST_ERR", 175 "CXN_KILLED_UNKNOWN_HDR", 176 "CXN_KILLED_STALE_ITT_TTT_RCVD", 177 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 178 "CXN_KILLED_RST_RCVD", 179 "CXN_KILLED_TIMED_OUT", 180 "CXN_KILLED_RST_SENT", 181 "CXN_KILLED_FIN_RCVD", 182 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 183 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 184 "CXN_KILLED_OVER_RUN_RESIDUAL", 185 "CXN_KILLED_UNDER_RUN_RESIDUAL", 186 "CMD_KILLED_INVALID_STATSN_RCVD", 187 "CMD_KILLED_INVALID_R2T_RCVD", 188 "CMD_CXN_KILLED_LUN_INVALID", 189 "CMD_CXN_KILLED_ICD_INVALID", 190 "CMD_CXN_KILLED_ITT_INVALID", 191 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 192 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 193 "CXN_INVALIDATE_NOTIFY", 194 "CXN_INVALIDATE_INDEX_NOTIFY", 195 "CMD_INVALIDATED_NOTIFY", 196 "UNSOL_HDR_NOTIFY", 197 "UNSOL_DATA_NOTIFY", 198 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 199 "DRIVERMSG_NOTIFY", 200 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 201 "SOL_CMD_KILLED_DIF_ERR", 202 "CXN_KILLED_SYN_RCVD", 203 "CXN_KILLED_IMM_DATA_RCVD" 204 }; 205 206 static int beiscsi_slave_configure(struct scsi_device *sdev) 207 { 208 blk_queue_max_segment_size(sdev->request_queue, 65536); 209 return 0; 210 } 211 212 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 213 { 214 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 215 struct iscsi_cls_session *cls_session; 216 struct beiscsi_io_task *abrt_io_task; 217 struct beiscsi_conn *beiscsi_conn; 218 struct iscsi_session *session; 219 struct invldt_cmd_tbl inv_tbl; 220 struct beiscsi_hba *phba; 221 struct iscsi_conn *conn; 222 int rc; 223 224 cls_session = starget_to_session(scsi_target(sc->device)); 225 session = cls_session->dd_data; 226 227 /* check if we raced, task just got cleaned up under us */ 228 spin_lock_bh(&session->back_lock); 229 if (!abrt_task || !abrt_task->sc) { 230 spin_unlock_bh(&session->back_lock); 231 return SUCCESS; 232 } 233 /* get a task ref till FW processes the req for the ICD used */ 234 __iscsi_get_task(abrt_task); 235 abrt_io_task = abrt_task->dd_data; 236 conn = abrt_task->conn; 237 beiscsi_conn = conn->dd_data; 238 phba = beiscsi_conn->phba; 239 /* mark WRB invalid which have been not processed by FW yet */ 240 if (is_chip_be2_be3r(phba)) { 241 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 242 abrt_io_task->pwrb_handle->pwrb, 1); 243 } else { 244 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 245 abrt_io_task->pwrb_handle->pwrb, 1); 246 } 247 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 248 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 249 spin_unlock_bh(&session->back_lock); 250 251 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 252 iscsi_put_task(abrt_task); 253 if (rc) { 254 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 255 "BM_%d : sc %p invalidation failed %d\n", 256 sc, rc); 257 return FAILED; 258 } 259 260 return iscsi_eh_abort(sc); 261 } 262 263 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 264 { 265 struct beiscsi_invldt_cmd_tbl { 266 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 267 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 268 } *inv_tbl; 269 struct iscsi_cls_session *cls_session; 270 struct beiscsi_conn *beiscsi_conn; 271 struct beiscsi_io_task *io_task; 272 struct iscsi_session *session; 273 struct beiscsi_hba *phba; 274 struct iscsi_conn *conn; 275 struct iscsi_task *task; 276 unsigned int i, nents; 277 int rc, more = 0; 278 279 cls_session = starget_to_session(scsi_target(sc->device)); 280 session = cls_session->dd_data; 281 282 spin_lock_bh(&session->frwd_lock); 283 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 284 spin_unlock_bh(&session->frwd_lock); 285 return FAILED; 286 } 287 288 conn = session->leadconn; 289 beiscsi_conn = conn->dd_data; 290 phba = beiscsi_conn->phba; 291 292 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 293 if (!inv_tbl) { 294 spin_unlock_bh(&session->frwd_lock); 295 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 296 "BM_%d : invldt_cmd_tbl alloc failed\n"); 297 return FAILED; 298 } 299 nents = 0; 300 /* take back_lock to prevent task from getting cleaned up under us */ 301 spin_lock(&session->back_lock); 302 for (i = 0; i < conn->session->cmds_max; i++) { 303 task = conn->session->cmds[i]; 304 if (!task->sc) 305 continue; 306 307 if (sc->device->lun != task->sc->device->lun) 308 continue; 309 /** 310 * Can't fit in more cmds? Normally this won't happen b'coz 311 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 312 */ 313 if (nents == BE_INVLDT_CMD_TBL_SZ) { 314 more = 1; 315 break; 316 } 317 318 /* get a task ref till FW processes the req for the ICD used */ 319 __iscsi_get_task(task); 320 io_task = task->dd_data; 321 /* mark WRB invalid which have been not processed by FW yet */ 322 if (is_chip_be2_be3r(phba)) { 323 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 324 io_task->pwrb_handle->pwrb, 1); 325 } else { 326 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 327 io_task->pwrb_handle->pwrb, 1); 328 } 329 330 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 331 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 332 inv_tbl->task[nents] = task; 333 nents++; 334 } 335 spin_unlock(&session->back_lock); 336 spin_unlock_bh(&session->frwd_lock); 337 338 rc = SUCCESS; 339 if (!nents) 340 goto end_reset; 341 342 if (more) { 343 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 344 "BM_%d : number of cmds exceeds size of invalidation table\n"); 345 rc = FAILED; 346 goto end_reset; 347 } 348 349 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 350 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 351 "BM_%d : cid %u scmds invalidation failed\n", 352 beiscsi_conn->beiscsi_conn_cid); 353 rc = FAILED; 354 } 355 356 end_reset: 357 for (i = 0; i < nents; i++) 358 iscsi_put_task(inv_tbl->task[i]); 359 kfree(inv_tbl); 360 361 if (rc == SUCCESS) 362 rc = iscsi_eh_device_reset(sc); 363 return rc; 364 } 365 366 /*------------------- PCI Driver operations and data ----------------- */ 367 static const struct pci_device_id beiscsi_pci_id_table[] = { 368 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 369 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 370 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 371 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 372 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 373 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 374 { 0 } 375 }; 376 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 377 378 379 static struct scsi_host_template beiscsi_sht = { 380 .module = THIS_MODULE, 381 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 382 .proc_name = DRV_NAME, 383 .queuecommand = iscsi_queuecommand, 384 .change_queue_depth = scsi_change_queue_depth, 385 .slave_configure = beiscsi_slave_configure, 386 .target_alloc = iscsi_target_alloc, 387 .eh_timed_out = iscsi_eh_cmd_timed_out, 388 .eh_abort_handler = beiscsi_eh_abort, 389 .eh_device_reset_handler = beiscsi_eh_device_reset, 390 .eh_target_reset_handler = iscsi_eh_session_reset, 391 .shost_attrs = beiscsi_attrs, 392 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 393 .can_queue = BE2_IO_DEPTH, 394 .this_id = -1, 395 .max_sectors = BEISCSI_MAX_SECTORS, 396 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 397 .use_clustering = ENABLE_CLUSTERING, 398 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 399 .track_queue_depth = 1, 400 }; 401 402 static struct scsi_transport_template *beiscsi_scsi_transport; 403 404 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 405 { 406 struct beiscsi_hba *phba; 407 struct Scsi_Host *shost; 408 409 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 410 if (!shost) { 411 dev_err(&pcidev->dev, 412 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 413 return NULL; 414 } 415 shost->max_id = BE2_MAX_SESSIONS; 416 shost->max_channel = 0; 417 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 418 shost->max_lun = BEISCSI_NUM_MAX_LUN; 419 shost->transportt = beiscsi_scsi_transport; 420 phba = iscsi_host_priv(shost); 421 memset(phba, 0, sizeof(*phba)); 422 phba->shost = shost; 423 phba->pcidev = pci_dev_get(pcidev); 424 pci_set_drvdata(pcidev, phba); 425 phba->interface_handle = 0xFFFFFFFF; 426 427 return phba; 428 } 429 430 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 431 { 432 if (phba->csr_va) { 433 iounmap(phba->csr_va); 434 phba->csr_va = NULL; 435 } 436 if (phba->db_va) { 437 iounmap(phba->db_va); 438 phba->db_va = NULL; 439 } 440 if (phba->pci_va) { 441 iounmap(phba->pci_va); 442 phba->pci_va = NULL; 443 } 444 } 445 446 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 447 struct pci_dev *pcidev) 448 { 449 u8 __iomem *addr; 450 int pcicfg_reg; 451 452 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 453 pci_resource_len(pcidev, 2)); 454 if (addr == NULL) 455 return -ENOMEM; 456 phba->ctrl.csr = addr; 457 phba->csr_va = addr; 458 459 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 460 if (addr == NULL) 461 goto pci_map_err; 462 phba->ctrl.db = addr; 463 phba->db_va = addr; 464 465 if (phba->generation == BE_GEN2) 466 pcicfg_reg = 1; 467 else 468 pcicfg_reg = 0; 469 470 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 471 pci_resource_len(pcidev, pcicfg_reg)); 472 473 if (addr == NULL) 474 goto pci_map_err; 475 phba->ctrl.pcicfg = addr; 476 phba->pci_va = addr; 477 return 0; 478 479 pci_map_err: 480 beiscsi_unmap_pci_function(phba); 481 return -ENOMEM; 482 } 483 484 static int beiscsi_enable_pci(struct pci_dev *pcidev) 485 { 486 int ret; 487 488 ret = pci_enable_device(pcidev); 489 if (ret) { 490 dev_err(&pcidev->dev, 491 "beiscsi_enable_pci - enable device failed\n"); 492 return ret; 493 } 494 495 ret = pci_request_regions(pcidev, DRV_NAME); 496 if (ret) { 497 dev_err(&pcidev->dev, 498 "beiscsi_enable_pci - request region failed\n"); 499 goto pci_dev_disable; 500 } 501 502 pci_set_master(pcidev); 503 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 504 if (ret) { 505 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 506 if (ret) { 507 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 508 goto pci_region_release; 509 } else { 510 ret = pci_set_consistent_dma_mask(pcidev, 511 DMA_BIT_MASK(32)); 512 } 513 } else { 514 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 515 if (ret) { 516 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 517 goto pci_region_release; 518 } 519 } 520 return 0; 521 522 pci_region_release: 523 pci_release_regions(pcidev); 524 pci_dev_disable: 525 pci_disable_device(pcidev); 526 527 return ret; 528 } 529 530 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 531 { 532 struct be_ctrl_info *ctrl = &phba->ctrl; 533 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 534 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 535 int status = 0; 536 537 ctrl->pdev = pdev; 538 status = beiscsi_map_pci_bars(phba, pdev); 539 if (status) 540 return status; 541 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 542 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 543 mbox_mem_alloc->size, 544 &mbox_mem_alloc->dma); 545 if (!mbox_mem_alloc->va) { 546 beiscsi_unmap_pci_function(phba); 547 return -ENOMEM; 548 } 549 550 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 551 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 552 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 553 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 554 mutex_init(&ctrl->mbox_lock); 555 spin_lock_init(&phba->ctrl.mcc_lock); 556 557 return status; 558 } 559 560 /** 561 * beiscsi_get_params()- Set the config paramters 562 * @phba: ptr device priv structure 563 **/ 564 static void beiscsi_get_params(struct beiscsi_hba *phba) 565 { 566 uint32_t total_cid_count = 0; 567 uint32_t total_icd_count = 0; 568 uint8_t ulp_num = 0; 569 570 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 571 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 572 573 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 574 uint32_t align_mask = 0; 575 uint32_t icd_post_per_page = 0; 576 uint32_t icd_count_unavailable = 0; 577 uint32_t icd_start = 0, icd_count = 0; 578 uint32_t icd_start_align = 0, icd_count_align = 0; 579 580 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 581 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 582 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 583 584 /* Get ICD count that can be posted on each page */ 585 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 586 sizeof(struct iscsi_sge))); 587 align_mask = (icd_post_per_page - 1); 588 589 /* Check if icd_start is aligned ICD per page posting */ 590 if (icd_start % icd_post_per_page) { 591 icd_start_align = ((icd_start + 592 icd_post_per_page) & 593 ~(align_mask)); 594 phba->fw_config. 595 iscsi_icd_start[ulp_num] = 596 icd_start_align; 597 } 598 599 icd_count_align = (icd_count & ~align_mask); 600 601 /* ICD discarded in the process of alignment */ 602 if (icd_start_align) 603 icd_count_unavailable = ((icd_start_align - 604 icd_start) + 605 (icd_count - 606 icd_count_align)); 607 608 /* Updated ICD count available */ 609 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 610 icd_count_unavailable); 611 612 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 613 "BM_%d : Aligned ICD values\n" 614 "\t ICD Start : %d\n" 615 "\t ICD Count : %d\n" 616 "\t ICD Discarded : %d\n", 617 phba->fw_config. 618 iscsi_icd_start[ulp_num], 619 phba->fw_config. 620 iscsi_icd_count[ulp_num], 621 icd_count_unavailable); 622 break; 623 } 624 } 625 626 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 627 phba->params.ios_per_ctrl = (total_icd_count - 628 (total_cid_count + 629 BE2_TMFS + BE2_NOPOUT_REQ)); 630 phba->params.cxns_per_ctrl = total_cid_count; 631 phba->params.icds_per_ctrl = total_icd_count; 632 phba->params.num_sge_per_io = BE2_SGE; 633 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 634 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 635 phba->params.num_eq_entries = 1024; 636 phba->params.num_cq_entries = 1024; 637 phba->params.wrbs_per_cxn = 256; 638 } 639 640 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 641 unsigned int id, unsigned int clr_interrupt, 642 unsigned int num_processed, 643 unsigned char rearm, unsigned char event) 644 { 645 u32 val = 0; 646 647 if (rearm) 648 val |= 1 << DB_EQ_REARM_SHIFT; 649 if (clr_interrupt) 650 val |= 1 << DB_EQ_CLR_SHIFT; 651 if (event) 652 val |= 1 << DB_EQ_EVNT_SHIFT; 653 654 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 655 /* Setting lower order EQ_ID Bits */ 656 val |= (id & DB_EQ_RING_ID_LOW_MASK); 657 658 /* Setting Higher order EQ_ID Bits */ 659 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 660 DB_EQ_RING_ID_HIGH_MASK) 661 << DB_EQ_HIGH_SET_SHIFT); 662 663 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 664 } 665 666 /** 667 * be_isr_mcc - The isr routine of the driver. 668 * @irq: Not used 669 * @dev_id: Pointer to host adapter structure 670 */ 671 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 672 { 673 struct beiscsi_hba *phba; 674 struct be_eq_entry *eqe; 675 struct be_queue_info *eq; 676 struct be_queue_info *mcc; 677 unsigned int mcc_events; 678 struct be_eq_obj *pbe_eq; 679 680 pbe_eq = dev_id; 681 eq = &pbe_eq->q; 682 phba = pbe_eq->phba; 683 mcc = &phba->ctrl.mcc_obj.cq; 684 eqe = queue_tail_node(eq); 685 686 mcc_events = 0; 687 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 688 & EQE_VALID_MASK) { 689 if (((eqe->dw[offsetof(struct amap_eq_entry, 690 resource_id) / 32] & 691 EQE_RESID_MASK) >> 16) == mcc->id) { 692 mcc_events++; 693 } 694 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 695 queue_tail_inc(eq); 696 eqe = queue_tail_node(eq); 697 } 698 699 if (mcc_events) { 700 queue_work(phba->wq, &pbe_eq->mcc_work); 701 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 702 } 703 return IRQ_HANDLED; 704 } 705 706 /** 707 * be_isr_msix - The isr routine of the driver. 708 * @irq: Not used 709 * @dev_id: Pointer to host adapter structure 710 */ 711 static irqreturn_t be_isr_msix(int irq, void *dev_id) 712 { 713 struct beiscsi_hba *phba; 714 struct be_queue_info *eq; 715 struct be_eq_obj *pbe_eq; 716 717 pbe_eq = dev_id; 718 eq = &pbe_eq->q; 719 720 phba = pbe_eq->phba; 721 /* disable interrupt till iopoll completes */ 722 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 723 irq_poll_sched(&pbe_eq->iopoll); 724 725 return IRQ_HANDLED; 726 } 727 728 /** 729 * be_isr - The isr routine of the driver. 730 * @irq: Not used 731 * @dev_id: Pointer to host adapter structure 732 */ 733 static irqreturn_t be_isr(int irq, void *dev_id) 734 { 735 struct beiscsi_hba *phba; 736 struct hwi_controller *phwi_ctrlr; 737 struct hwi_context_memory *phwi_context; 738 struct be_eq_entry *eqe; 739 struct be_queue_info *eq; 740 struct be_queue_info *mcc; 741 unsigned int mcc_events, io_events; 742 struct be_ctrl_info *ctrl; 743 struct be_eq_obj *pbe_eq; 744 int isr, rearm; 745 746 phba = dev_id; 747 ctrl = &phba->ctrl; 748 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 749 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 750 if (!isr) 751 return IRQ_NONE; 752 753 phwi_ctrlr = phba->phwi_ctrlr; 754 phwi_context = phwi_ctrlr->phwi_ctxt; 755 pbe_eq = &phwi_context->be_eq[0]; 756 757 eq = &phwi_context->be_eq[0].q; 758 mcc = &phba->ctrl.mcc_obj.cq; 759 eqe = queue_tail_node(eq); 760 761 io_events = 0; 762 mcc_events = 0; 763 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 764 & EQE_VALID_MASK) { 765 if (((eqe->dw[offsetof(struct amap_eq_entry, 766 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 767 mcc_events++; 768 else 769 io_events++; 770 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 771 queue_tail_inc(eq); 772 eqe = queue_tail_node(eq); 773 } 774 if (!io_events && !mcc_events) 775 return IRQ_NONE; 776 777 /* no need to rearm if interrupt is only for IOs */ 778 rearm = 0; 779 if (mcc_events) { 780 queue_work(phba->wq, &pbe_eq->mcc_work); 781 /* rearm for MCCQ */ 782 rearm = 1; 783 } 784 if (io_events) 785 irq_poll_sched(&pbe_eq->iopoll); 786 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 787 return IRQ_HANDLED; 788 } 789 790 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 791 { 792 struct hwi_context_memory *phwi_context; 793 int i; 794 795 if (!phba->pcidev->msix_enabled) { 796 if (phba->pcidev->irq) 797 free_irq(phba->pcidev->irq, phba); 798 return; 799 } 800 801 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 802 for (i = 0; i <= phba->num_cpus; i++) { 803 free_irq(pci_irq_vector(phba->pcidev, i), 804 &phwi_context->be_eq[i]); 805 kfree(phba->msi_name[i]); 806 } 807 } 808 809 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 810 { 811 struct pci_dev *pcidev = phba->pcidev; 812 struct hwi_controller *phwi_ctrlr; 813 struct hwi_context_memory *phwi_context; 814 int ret, i, j; 815 816 phwi_ctrlr = phba->phwi_ctrlr; 817 phwi_context = phwi_ctrlr->phwi_ctxt; 818 819 if (pcidev->msix_enabled) { 820 for (i = 0; i < phba->num_cpus; i++) { 821 phba->msi_name[i] = kasprintf(GFP_KERNEL, 822 "beiscsi_%02x_%02x", 823 phba->shost->host_no, i); 824 if (!phba->msi_name[i]) { 825 ret = -ENOMEM; 826 goto free_msix_irqs; 827 } 828 829 ret = request_irq(pci_irq_vector(pcidev, i), 830 be_isr_msix, 0, phba->msi_name[i], 831 &phwi_context->be_eq[i]); 832 if (ret) { 833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 834 "BM_%d : beiscsi_init_irqs-Failed to" 835 "register msix for i = %d\n", 836 i); 837 kfree(phba->msi_name[i]); 838 goto free_msix_irqs; 839 } 840 } 841 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 842 phba->shost->host_no); 843 if (!phba->msi_name[i]) { 844 ret = -ENOMEM; 845 goto free_msix_irqs; 846 } 847 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 848 phba->msi_name[i], &phwi_context->be_eq[i]); 849 if (ret) { 850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 851 "BM_%d : beiscsi_init_irqs-" 852 "Failed to register beiscsi_msix_mcc\n"); 853 kfree(phba->msi_name[i]); 854 goto free_msix_irqs; 855 } 856 857 } else { 858 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 859 "beiscsi", phba); 860 if (ret) { 861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 862 "BM_%d : beiscsi_init_irqs-" 863 "Failed to register irq\\n"); 864 return ret; 865 } 866 } 867 return 0; 868 free_msix_irqs: 869 for (j = i - 1; j >= 0; j--) { 870 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 871 kfree(phba->msi_name[j]); 872 } 873 return ret; 874 } 875 876 void hwi_ring_cq_db(struct beiscsi_hba *phba, 877 unsigned int id, unsigned int num_processed, 878 unsigned char rearm) 879 { 880 u32 val = 0; 881 882 if (rearm) 883 val |= 1 << DB_CQ_REARM_SHIFT; 884 885 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 886 887 /* Setting lower order CQ_ID Bits */ 888 val |= (id & DB_CQ_RING_ID_LOW_MASK); 889 890 /* Setting Higher order CQ_ID Bits */ 891 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 892 DB_CQ_RING_ID_HIGH_MASK) 893 << DB_CQ_HIGH_SET_SHIFT); 894 895 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 896 } 897 898 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 899 { 900 struct sgl_handle *psgl_handle; 901 unsigned long flags; 902 903 spin_lock_irqsave(&phba->io_sgl_lock, flags); 904 if (phba->io_sgl_hndl_avbl) { 905 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 906 "BM_%d : In alloc_io_sgl_handle," 907 " io_sgl_alloc_index=%d\n", 908 phba->io_sgl_alloc_index); 909 910 psgl_handle = phba->io_sgl_hndl_base[phba-> 911 io_sgl_alloc_index]; 912 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 913 phba->io_sgl_hndl_avbl--; 914 if (phba->io_sgl_alloc_index == (phba->params. 915 ios_per_ctrl - 1)) 916 phba->io_sgl_alloc_index = 0; 917 else 918 phba->io_sgl_alloc_index++; 919 } else 920 psgl_handle = NULL; 921 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 922 return psgl_handle; 923 } 924 925 static void 926 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 927 { 928 unsigned long flags; 929 930 spin_lock_irqsave(&phba->io_sgl_lock, flags); 931 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 932 "BM_%d : In free_,io_sgl_free_index=%d\n", 933 phba->io_sgl_free_index); 934 935 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 936 /* 937 * this can happen if clean_task is called on a task that 938 * failed in xmit_task or alloc_pdu. 939 */ 940 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 941 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 942 phba->io_sgl_free_index, 943 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 944 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 945 return; 946 } 947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 948 phba->io_sgl_hndl_avbl++; 949 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 950 phba->io_sgl_free_index = 0; 951 else 952 phba->io_sgl_free_index++; 953 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 954 } 955 956 static inline struct wrb_handle * 957 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 958 unsigned int wrbs_per_cxn) 959 { 960 struct wrb_handle *pwrb_handle; 961 unsigned long flags; 962 963 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 964 if (!pwrb_context->wrb_handles_available) { 965 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 966 return NULL; 967 } 968 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 969 pwrb_context->wrb_handles_available--; 970 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 971 pwrb_context->alloc_index = 0; 972 else 973 pwrb_context->alloc_index++; 974 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 975 976 if (pwrb_handle) 977 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 978 979 return pwrb_handle; 980 } 981 982 /** 983 * alloc_wrb_handle - To allocate a wrb handle 984 * @phba: The hba pointer 985 * @cid: The cid to use for allocation 986 * @pwrb_context: ptr to ptr to wrb context 987 * 988 * This happens under session_lock until submission to chip 989 */ 990 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 991 struct hwi_wrb_context **pcontext) 992 { 993 struct hwi_wrb_context *pwrb_context; 994 struct hwi_controller *phwi_ctrlr; 995 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 996 997 phwi_ctrlr = phba->phwi_ctrlr; 998 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 999 /* return the context address */ 1000 *pcontext = pwrb_context; 1001 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1002 } 1003 1004 static inline void 1005 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1006 struct wrb_handle *pwrb_handle, 1007 unsigned int wrbs_per_cxn) 1008 { 1009 unsigned long flags; 1010 1011 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1012 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1013 pwrb_context->wrb_handles_available++; 1014 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1015 pwrb_context->free_index = 0; 1016 else 1017 pwrb_context->free_index++; 1018 pwrb_handle->pio_handle = NULL; 1019 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1020 } 1021 1022 /** 1023 * free_wrb_handle - To free the wrb handle back to pool 1024 * @phba: The hba pointer 1025 * @pwrb_context: The context to free from 1026 * @pwrb_handle: The wrb_handle to free 1027 * 1028 * This happens under session_lock until submission to chip 1029 */ 1030 static void 1031 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1032 struct wrb_handle *pwrb_handle) 1033 { 1034 beiscsi_put_wrb_handle(pwrb_context, 1035 pwrb_handle, 1036 phba->params.wrbs_per_cxn); 1037 beiscsi_log(phba, KERN_INFO, 1038 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1039 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1040 "wrb_handles_available=%d\n", 1041 pwrb_handle, pwrb_context->free_index, 1042 pwrb_context->wrb_handles_available); 1043 } 1044 1045 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1046 { 1047 struct sgl_handle *psgl_handle; 1048 unsigned long flags; 1049 1050 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1051 if (phba->eh_sgl_hndl_avbl) { 1052 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1053 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1055 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1056 phba->eh_sgl_alloc_index, 1057 phba->eh_sgl_alloc_index); 1058 1059 phba->eh_sgl_hndl_avbl--; 1060 if (phba->eh_sgl_alloc_index == 1061 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1062 1)) 1063 phba->eh_sgl_alloc_index = 0; 1064 else 1065 phba->eh_sgl_alloc_index++; 1066 } else 1067 psgl_handle = NULL; 1068 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1069 return psgl_handle; 1070 } 1071 1072 void 1073 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1074 { 1075 unsigned long flags; 1076 1077 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1078 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1079 "BM_%d : In free_mgmt_sgl_handle," 1080 "eh_sgl_free_index=%d\n", 1081 phba->eh_sgl_free_index); 1082 1083 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1084 /* 1085 * this can happen if clean_task is called on a task that 1086 * failed in xmit_task or alloc_pdu. 1087 */ 1088 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1089 "BM_%d : Double Free in eh SGL ," 1090 "eh_sgl_free_index=%d\n", 1091 phba->eh_sgl_free_index); 1092 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1093 return; 1094 } 1095 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1096 phba->eh_sgl_hndl_avbl++; 1097 if (phba->eh_sgl_free_index == 1098 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1099 phba->eh_sgl_free_index = 0; 1100 else 1101 phba->eh_sgl_free_index++; 1102 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1103 } 1104 1105 static void 1106 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1107 struct iscsi_task *task, 1108 struct common_sol_cqe *csol_cqe) 1109 { 1110 struct beiscsi_io_task *io_task = task->dd_data; 1111 struct be_status_bhs *sts_bhs = 1112 (struct be_status_bhs *)io_task->cmd_bhs; 1113 struct iscsi_conn *conn = beiscsi_conn->conn; 1114 unsigned char *sense; 1115 u32 resid = 0, exp_cmdsn, max_cmdsn; 1116 u8 rsp, status, flags; 1117 1118 exp_cmdsn = csol_cqe->exp_cmdsn; 1119 max_cmdsn = (csol_cqe->exp_cmdsn + 1120 csol_cqe->cmd_wnd - 1); 1121 rsp = csol_cqe->i_resp; 1122 status = csol_cqe->i_sts; 1123 flags = csol_cqe->i_flags; 1124 resid = csol_cqe->res_cnt; 1125 1126 if (!task->sc) { 1127 if (io_task->scsi_cmnd) { 1128 scsi_dma_unmap(io_task->scsi_cmnd); 1129 io_task->scsi_cmnd = NULL; 1130 } 1131 1132 return; 1133 } 1134 task->sc->result = (DID_OK << 16) | status; 1135 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1136 task->sc->result = DID_ERROR << 16; 1137 goto unmap; 1138 } 1139 1140 /* bidi not initially supported */ 1141 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1142 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1143 task->sc->result = DID_ERROR << 16; 1144 1145 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1146 scsi_set_resid(task->sc, resid); 1147 if (!status && (scsi_bufflen(task->sc) - resid < 1148 task->sc->underflow)) 1149 task->sc->result = DID_ERROR << 16; 1150 } 1151 } 1152 1153 if (status == SAM_STAT_CHECK_CONDITION) { 1154 u16 sense_len; 1155 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1156 1157 sense = sts_bhs->sense_info + sizeof(unsigned short); 1158 sense_len = be16_to_cpu(*slen); 1159 memcpy(task->sc->sense_buffer, sense, 1160 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1161 } 1162 1163 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1164 conn->rxdata_octets += resid; 1165 unmap: 1166 if (io_task->scsi_cmnd) { 1167 scsi_dma_unmap(io_task->scsi_cmnd); 1168 io_task->scsi_cmnd = NULL; 1169 } 1170 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1171 } 1172 1173 static void 1174 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1175 struct iscsi_task *task, 1176 struct common_sol_cqe *csol_cqe) 1177 { 1178 struct iscsi_logout_rsp *hdr; 1179 struct beiscsi_io_task *io_task = task->dd_data; 1180 struct iscsi_conn *conn = beiscsi_conn->conn; 1181 1182 hdr = (struct iscsi_logout_rsp *)task->hdr; 1183 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1184 hdr->t2wait = 5; 1185 hdr->t2retain = 0; 1186 hdr->flags = csol_cqe->i_flags; 1187 hdr->response = csol_cqe->i_resp; 1188 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1189 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1190 csol_cqe->cmd_wnd - 1); 1191 1192 hdr->dlength[0] = 0; 1193 hdr->dlength[1] = 0; 1194 hdr->dlength[2] = 0; 1195 hdr->hlength = 0; 1196 hdr->itt = io_task->libiscsi_itt; 1197 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1198 } 1199 1200 static void 1201 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1202 struct iscsi_task *task, 1203 struct common_sol_cqe *csol_cqe) 1204 { 1205 struct iscsi_tm_rsp *hdr; 1206 struct iscsi_conn *conn = beiscsi_conn->conn; 1207 struct beiscsi_io_task *io_task = task->dd_data; 1208 1209 hdr = (struct iscsi_tm_rsp *)task->hdr; 1210 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1211 hdr->flags = csol_cqe->i_flags; 1212 hdr->response = csol_cqe->i_resp; 1213 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1214 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1215 csol_cqe->cmd_wnd - 1); 1216 1217 hdr->itt = io_task->libiscsi_itt; 1218 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1219 } 1220 1221 static void 1222 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1223 struct beiscsi_hba *phba, struct sol_cqe *psol) 1224 { 1225 struct hwi_wrb_context *pwrb_context; 1226 uint16_t wrb_index, cid, cri_index; 1227 struct hwi_controller *phwi_ctrlr; 1228 struct wrb_handle *pwrb_handle; 1229 struct iscsi_session *session; 1230 struct iscsi_task *task; 1231 1232 phwi_ctrlr = phba->phwi_ctrlr; 1233 if (is_chip_be2_be3r(phba)) { 1234 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1235 wrb_idx, psol); 1236 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1237 cid, psol); 1238 } else { 1239 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1240 wrb_idx, psol); 1241 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1242 cid, psol); 1243 } 1244 1245 cri_index = BE_GET_CRI_FROM_CID(cid); 1246 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1247 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1248 session = beiscsi_conn->conn->session; 1249 spin_lock_bh(&session->back_lock); 1250 task = pwrb_handle->pio_handle; 1251 if (task) 1252 __iscsi_put_task(task); 1253 spin_unlock_bh(&session->back_lock); 1254 } 1255 1256 static void 1257 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1258 struct iscsi_task *task, 1259 struct common_sol_cqe *csol_cqe) 1260 { 1261 struct iscsi_nopin *hdr; 1262 struct iscsi_conn *conn = beiscsi_conn->conn; 1263 struct beiscsi_io_task *io_task = task->dd_data; 1264 1265 hdr = (struct iscsi_nopin *)task->hdr; 1266 hdr->flags = csol_cqe->i_flags; 1267 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1268 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1269 csol_cqe->cmd_wnd - 1); 1270 1271 hdr->opcode = ISCSI_OP_NOOP_IN; 1272 hdr->itt = io_task->libiscsi_itt; 1273 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1274 } 1275 1276 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1277 struct sol_cqe *psol, 1278 struct common_sol_cqe *csol_cqe) 1279 { 1280 if (is_chip_be2_be3r(phba)) { 1281 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1282 i_exp_cmd_sn, psol); 1283 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1284 i_res_cnt, psol); 1285 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1286 i_cmd_wnd, psol); 1287 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1288 wrb_index, psol); 1289 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1290 cid, psol); 1291 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1292 hw_sts, psol); 1293 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1294 i_resp, psol); 1295 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1296 i_sts, psol); 1297 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1298 i_flags, psol); 1299 } else { 1300 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1301 i_exp_cmd_sn, psol); 1302 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1303 i_res_cnt, psol); 1304 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1305 wrb_index, psol); 1306 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1307 cid, psol); 1308 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1309 hw_sts, psol); 1310 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1311 i_cmd_wnd, psol); 1312 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1313 cmd_cmpl, psol)) 1314 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1315 i_sts, psol); 1316 else 1317 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1318 i_sts, psol); 1319 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1320 u, psol)) 1321 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1322 1323 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1324 o, psol)) 1325 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1326 } 1327 } 1328 1329 1330 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1331 struct beiscsi_hba *phba, struct sol_cqe *psol) 1332 { 1333 struct iscsi_conn *conn = beiscsi_conn->conn; 1334 struct iscsi_session *session = conn->session; 1335 struct common_sol_cqe csol_cqe = {0}; 1336 struct hwi_wrb_context *pwrb_context; 1337 struct hwi_controller *phwi_ctrlr; 1338 struct wrb_handle *pwrb_handle; 1339 struct iscsi_task *task; 1340 uint16_t cri_index = 0; 1341 uint8_t type; 1342 1343 phwi_ctrlr = phba->phwi_ctrlr; 1344 1345 /* Copy the elements to a common structure */ 1346 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1347 1348 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1349 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1350 1351 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1352 csol_cqe.wrb_index]; 1353 1354 spin_lock_bh(&session->back_lock); 1355 task = pwrb_handle->pio_handle; 1356 if (!task) { 1357 spin_unlock_bh(&session->back_lock); 1358 return; 1359 } 1360 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1361 1362 switch (type) { 1363 case HWH_TYPE_IO: 1364 case HWH_TYPE_IO_RD: 1365 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1366 ISCSI_OP_NOOP_OUT) 1367 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1368 else 1369 be_complete_io(beiscsi_conn, task, &csol_cqe); 1370 break; 1371 1372 case HWH_TYPE_LOGOUT: 1373 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1374 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1375 else 1376 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1377 break; 1378 1379 case HWH_TYPE_LOGIN: 1380 beiscsi_log(phba, KERN_ERR, 1381 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1382 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1383 " hwi_complete_cmd- Solicited path\n"); 1384 break; 1385 1386 case HWH_TYPE_NOP: 1387 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1388 break; 1389 1390 default: 1391 beiscsi_log(phba, KERN_WARNING, 1392 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1393 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1394 "wrb_index 0x%x CID 0x%x\n", type, 1395 csol_cqe.wrb_index, 1396 csol_cqe.cid); 1397 break; 1398 } 1399 1400 spin_unlock_bh(&session->back_lock); 1401 } 1402 1403 /** 1404 * ASYNC PDUs include 1405 * a. Unsolicited NOP-In (target initiated NOP-In) 1406 * b. ASYNC Messages 1407 * c. Reject PDU 1408 * d. Login response 1409 * These headers arrive unprocessed by the EP firmware. 1410 * iSCSI layer processes them. 1411 */ 1412 static unsigned int 1413 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1414 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1415 { 1416 struct beiscsi_hba *phba = beiscsi_conn->phba; 1417 struct iscsi_conn *conn = beiscsi_conn->conn; 1418 struct beiscsi_io_task *io_task; 1419 struct iscsi_hdr *login_hdr; 1420 struct iscsi_task *task; 1421 u8 code; 1422 1423 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1424 switch (code) { 1425 case ISCSI_OP_NOOP_IN: 1426 pdata = NULL; 1427 dlen = 0; 1428 break; 1429 case ISCSI_OP_ASYNC_EVENT: 1430 break; 1431 case ISCSI_OP_REJECT: 1432 WARN_ON(!pdata); 1433 WARN_ON(!(dlen == 48)); 1434 beiscsi_log(phba, KERN_ERR, 1435 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1436 "BM_%d : In ISCSI_OP_REJECT\n"); 1437 break; 1438 case ISCSI_OP_LOGIN_RSP: 1439 case ISCSI_OP_TEXT_RSP: 1440 task = conn->login_task; 1441 io_task = task->dd_data; 1442 login_hdr = (struct iscsi_hdr *)phdr; 1443 login_hdr->itt = io_task->libiscsi_itt; 1444 break; 1445 default: 1446 beiscsi_log(phba, KERN_WARNING, 1447 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1448 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1449 code); 1450 return 1; 1451 } 1452 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1453 return 0; 1454 } 1455 1456 static inline void 1457 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1458 struct hd_async_handle *pasync_handle) 1459 { 1460 pasync_handle->is_final = 0; 1461 pasync_handle->buffer_len = 0; 1462 pasync_handle->in_use = 0; 1463 list_del_init(&pasync_handle->link); 1464 } 1465 1466 static void 1467 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1468 struct hd_async_context *pasync_ctx, 1469 u16 cri) 1470 { 1471 struct hd_async_handle *pasync_handle, *tmp_handle; 1472 struct list_head *plist; 1473 1474 plist = &pasync_ctx->async_entry[cri].wq.list; 1475 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1476 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1477 1478 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1479 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1480 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1481 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1482 } 1483 1484 static struct hd_async_handle * 1485 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1486 struct hd_async_context *pasync_ctx, 1487 struct i_t_dpdu_cqe *pdpdu_cqe, 1488 u8 *header) 1489 { 1490 struct beiscsi_hba *phba = beiscsi_conn->phba; 1491 struct hd_async_handle *pasync_handle; 1492 struct be_bus_address phys_addr; 1493 u16 cid, code, ci, cri; 1494 u8 final, error = 0; 1495 u32 dpl; 1496 1497 cid = beiscsi_conn->beiscsi_conn_cid; 1498 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1499 /** 1500 * This function is invoked to get the right async_handle structure 1501 * from a given DEF PDU CQ entry. 1502 * 1503 * - index in CQ entry gives the vertical index 1504 * - address in CQ entry is the offset where the DMA last ended 1505 * - final - no more notifications for this PDU 1506 */ 1507 if (is_chip_be2_be3r(phba)) { 1508 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1509 dpl, pdpdu_cqe); 1510 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1511 index, pdpdu_cqe); 1512 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1513 final, pdpdu_cqe); 1514 } else { 1515 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1516 dpl, pdpdu_cqe); 1517 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1518 index, pdpdu_cqe); 1519 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1520 final, pdpdu_cqe); 1521 } 1522 1523 /** 1524 * DB addr Hi/Lo is same for BE and SKH. 1525 * Subtract the dataplacementlength to get to the base. 1526 */ 1527 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1528 db_addr_lo, pdpdu_cqe); 1529 phys_addr.u.a32.address_lo -= dpl; 1530 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1531 db_addr_hi, pdpdu_cqe); 1532 1533 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1534 switch (code) { 1535 case UNSOL_HDR_NOTIFY: 1536 pasync_handle = pasync_ctx->async_entry[ci].header; 1537 *header = 1; 1538 break; 1539 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1540 error = 1; 1541 case UNSOL_DATA_NOTIFY: 1542 pasync_handle = pasync_ctx->async_entry[ci].data; 1543 break; 1544 /* called only for above codes */ 1545 default: 1546 return NULL; 1547 } 1548 1549 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1550 pasync_handle->index != ci) { 1551 /* driver bug - if ci does not match async handle index */ 1552 error = 1; 1553 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1554 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1555 cid, pasync_handle->is_header ? 'H' : 'D', 1556 pasync_handle->pa.u.a64.address, 1557 pasync_handle->index, 1558 phys_addr.u.a64.address, ci); 1559 /* FW has stale address - attempt continuing by dropping */ 1560 } 1561 1562 /** 1563 * DEF PDU header and data buffers with errors should be simply 1564 * dropped as there are no consumers for it. 1565 */ 1566 if (error) { 1567 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1568 return NULL; 1569 } 1570 1571 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1572 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1573 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1574 cid, code, ci, phys_addr.u.a64.address); 1575 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1576 } 1577 1578 list_del_init(&pasync_handle->link); 1579 /** 1580 * Each CID is associated with unique CRI. 1581 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1582 **/ 1583 pasync_handle->cri = cri; 1584 pasync_handle->is_final = final; 1585 pasync_handle->buffer_len = dpl; 1586 pasync_handle->in_use = 1; 1587 1588 return pasync_handle; 1589 } 1590 1591 static unsigned int 1592 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1593 struct hd_async_context *pasync_ctx, 1594 u16 cri) 1595 { 1596 struct iscsi_session *session = beiscsi_conn->conn->session; 1597 struct hd_async_handle *pasync_handle, *plast_handle; 1598 struct beiscsi_hba *phba = beiscsi_conn->phba; 1599 void *phdr = NULL, *pdata = NULL; 1600 u32 dlen = 0, status = 0; 1601 struct list_head *plist; 1602 1603 plist = &pasync_ctx->async_entry[cri].wq.list; 1604 plast_handle = NULL; 1605 list_for_each_entry(pasync_handle, plist, link) { 1606 plast_handle = pasync_handle; 1607 /* get the header, the first entry */ 1608 if (!phdr) { 1609 phdr = pasync_handle->pbuffer; 1610 continue; 1611 } 1612 /* use first buffer to collect all the data */ 1613 if (!pdata) { 1614 pdata = pasync_handle->pbuffer; 1615 dlen = pasync_handle->buffer_len; 1616 continue; 1617 } 1618 if (!pasync_handle->buffer_len || 1619 (dlen + pasync_handle->buffer_len) > 1620 pasync_ctx->async_data.buffer_size) 1621 break; 1622 memcpy(pdata + dlen, pasync_handle->pbuffer, 1623 pasync_handle->buffer_len); 1624 dlen += pasync_handle->buffer_len; 1625 } 1626 1627 if (!plast_handle->is_final) { 1628 /* last handle should have final PDU notification from FW */ 1629 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1630 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1631 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1632 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1633 pasync_ctx->async_entry[cri].wq.hdr_len, 1634 pasync_ctx->async_entry[cri].wq.bytes_needed, 1635 pasync_ctx->async_entry[cri].wq.bytes_received); 1636 } 1637 spin_lock_bh(&session->back_lock); 1638 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1639 spin_unlock_bh(&session->back_lock); 1640 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1641 return status; 1642 } 1643 1644 static unsigned int 1645 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1646 struct hd_async_context *pasync_ctx, 1647 struct hd_async_handle *pasync_handle) 1648 { 1649 unsigned int bytes_needed = 0, status = 0; 1650 u16 cri = pasync_handle->cri; 1651 struct cri_wait_queue *wq; 1652 struct beiscsi_hba *phba; 1653 struct pdu_base *ppdu; 1654 char *err = ""; 1655 1656 phba = beiscsi_conn->phba; 1657 wq = &pasync_ctx->async_entry[cri].wq; 1658 if (pasync_handle->is_header) { 1659 /* check if PDU hdr is rcv'd when old hdr not completed */ 1660 if (wq->hdr_len) { 1661 err = "incomplete"; 1662 goto drop_pdu; 1663 } 1664 ppdu = pasync_handle->pbuffer; 1665 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1666 data_len_hi, ppdu); 1667 bytes_needed <<= 16; 1668 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1669 data_len_lo, ppdu)); 1670 wq->hdr_len = pasync_handle->buffer_len; 1671 wq->bytes_received = 0; 1672 wq->bytes_needed = bytes_needed; 1673 list_add_tail(&pasync_handle->link, &wq->list); 1674 if (!bytes_needed) 1675 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1676 pasync_ctx, cri); 1677 } else { 1678 /* check if data received has header and is needed */ 1679 if (!wq->hdr_len || !wq->bytes_needed) { 1680 err = "header less"; 1681 goto drop_pdu; 1682 } 1683 wq->bytes_received += pasync_handle->buffer_len; 1684 /* Something got overwritten? Better catch it here. */ 1685 if (wq->bytes_received > wq->bytes_needed) { 1686 err = "overflow"; 1687 goto drop_pdu; 1688 } 1689 list_add_tail(&pasync_handle->link, &wq->list); 1690 if (wq->bytes_received == wq->bytes_needed) 1691 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1692 pasync_ctx, cri); 1693 } 1694 return status; 1695 1696 drop_pdu: 1697 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1698 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1699 beiscsi_conn->beiscsi_conn_cid, err, 1700 pasync_handle->is_header ? 'H' : 'D', 1701 wq->hdr_len, wq->bytes_needed, 1702 pasync_handle->buffer_len); 1703 /* discard this handle */ 1704 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1705 /* free all the other handles in cri_wait_queue */ 1706 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1707 /* try continuing */ 1708 return status; 1709 } 1710 1711 static void 1712 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1713 u8 header, u8 ulp_num, u16 nbuf) 1714 { 1715 struct hd_async_handle *pasync_handle; 1716 struct hd_async_context *pasync_ctx; 1717 struct hwi_controller *phwi_ctrlr; 1718 struct phys_addr *pasync_sge; 1719 u32 ring_id, doorbell = 0; 1720 u32 doorbell_offset; 1721 u16 prod, pi; 1722 1723 phwi_ctrlr = phba->phwi_ctrlr; 1724 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1725 if (header) { 1726 pasync_sge = pasync_ctx->async_header.ring_base; 1727 pi = pasync_ctx->async_header.pi; 1728 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1729 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1730 doorbell_offset; 1731 } else { 1732 pasync_sge = pasync_ctx->async_data.ring_base; 1733 pi = pasync_ctx->async_data.pi; 1734 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1735 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1736 doorbell_offset; 1737 } 1738 1739 for (prod = 0; prod < nbuf; prod++) { 1740 if (header) 1741 pasync_handle = pasync_ctx->async_entry[pi].header; 1742 else 1743 pasync_handle = pasync_ctx->async_entry[pi].data; 1744 WARN_ON(pasync_handle->is_header != header); 1745 WARN_ON(pasync_handle->index != pi); 1746 /* setup the ring only once */ 1747 if (nbuf == pasync_ctx->num_entries) { 1748 /* note hi is lo */ 1749 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1750 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1751 } 1752 if (++pi == pasync_ctx->num_entries) 1753 pi = 0; 1754 } 1755 1756 if (header) 1757 pasync_ctx->async_header.pi = pi; 1758 else 1759 pasync_ctx->async_data.pi = pi; 1760 1761 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1762 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1763 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1764 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1765 iowrite32(doorbell, phba->db_va + doorbell_offset); 1766 } 1767 1768 static void 1769 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1770 struct i_t_dpdu_cqe *pdpdu_cqe) 1771 { 1772 struct beiscsi_hba *phba = beiscsi_conn->phba; 1773 struct hd_async_handle *pasync_handle = NULL; 1774 struct hd_async_context *pasync_ctx; 1775 struct hwi_controller *phwi_ctrlr; 1776 u8 ulp_num, consumed, header = 0; 1777 u16 cid_cri; 1778 1779 phwi_ctrlr = phba->phwi_ctrlr; 1780 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1781 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1782 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1783 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1784 pdpdu_cqe, &header); 1785 if (is_chip_be2_be3r(phba)) 1786 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1787 num_cons, pdpdu_cqe); 1788 else 1789 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1790 num_cons, pdpdu_cqe); 1791 if (pasync_handle) 1792 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1793 /* num_cons indicates number of 8 RQEs consumed */ 1794 if (consumed) 1795 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1796 } 1797 1798 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1799 { 1800 struct be_queue_info *mcc_cq; 1801 struct be_mcc_compl *mcc_compl; 1802 unsigned int num_processed = 0; 1803 1804 mcc_cq = &phba->ctrl.mcc_obj.cq; 1805 mcc_compl = queue_tail_node(mcc_cq); 1806 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1807 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1808 if (beiscsi_hba_in_error(phba)) 1809 return; 1810 1811 if (num_processed >= 32) { 1812 hwi_ring_cq_db(phba, mcc_cq->id, 1813 num_processed, 0); 1814 num_processed = 0; 1815 } 1816 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1817 beiscsi_process_async_event(phba, mcc_compl); 1818 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1819 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1820 } 1821 1822 mcc_compl->flags = 0; 1823 queue_tail_inc(mcc_cq); 1824 mcc_compl = queue_tail_node(mcc_cq); 1825 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1826 num_processed++; 1827 } 1828 1829 if (num_processed > 0) 1830 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1831 } 1832 1833 static void beiscsi_mcc_work(struct work_struct *work) 1834 { 1835 struct be_eq_obj *pbe_eq; 1836 struct beiscsi_hba *phba; 1837 1838 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1839 phba = pbe_eq->phba; 1840 beiscsi_process_mcc_cq(phba); 1841 /* rearm EQ for further interrupts */ 1842 if (!beiscsi_hba_in_error(phba)) 1843 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1844 } 1845 1846 /** 1847 * beiscsi_process_cq()- Process the Completion Queue 1848 * @pbe_eq: Event Q on which the Completion has come 1849 * @budget: Max number of events to processed 1850 * 1851 * return 1852 * Number of Completion Entries processed. 1853 **/ 1854 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1855 { 1856 struct be_queue_info *cq; 1857 struct sol_cqe *sol; 1858 struct dmsg_cqe *dmsg; 1859 unsigned int total = 0; 1860 unsigned int num_processed = 0; 1861 unsigned short code = 0, cid = 0; 1862 uint16_t cri_index = 0; 1863 struct beiscsi_conn *beiscsi_conn; 1864 struct beiscsi_endpoint *beiscsi_ep; 1865 struct iscsi_endpoint *ep; 1866 struct beiscsi_hba *phba; 1867 1868 cq = pbe_eq->cq; 1869 sol = queue_tail_node(cq); 1870 phba = pbe_eq->phba; 1871 1872 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1873 CQE_VALID_MASK) { 1874 if (beiscsi_hba_in_error(phba)) 1875 return 0; 1876 1877 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1878 1879 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1880 CQE_CODE_MASK); 1881 1882 /* Get the CID */ 1883 if (is_chip_be2_be3r(phba)) { 1884 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1885 } else { 1886 if ((code == DRIVERMSG_NOTIFY) || 1887 (code == UNSOL_HDR_NOTIFY) || 1888 (code == UNSOL_DATA_NOTIFY)) 1889 cid = AMAP_GET_BITS( 1890 struct amap_i_t_dpdu_cqe_v2, 1891 cid, sol); 1892 else 1893 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1894 cid, sol); 1895 } 1896 1897 cri_index = BE_GET_CRI_FROM_CID(cid); 1898 ep = phba->ep_array[cri_index]; 1899 1900 if (ep == NULL) { 1901 /* connection has already been freed 1902 * just move on to next one 1903 */ 1904 beiscsi_log(phba, KERN_WARNING, 1905 BEISCSI_LOG_INIT, 1906 "BM_%d : proc cqe of disconn ep: cid %d\n", 1907 cid); 1908 goto proc_next_cqe; 1909 } 1910 1911 beiscsi_ep = ep->dd_data; 1912 beiscsi_conn = beiscsi_ep->conn; 1913 1914 /* replenish cq */ 1915 if (num_processed == 32) { 1916 hwi_ring_cq_db(phba, cq->id, 32, 0); 1917 num_processed = 0; 1918 } 1919 total++; 1920 1921 switch (code) { 1922 case SOL_CMD_COMPLETE: 1923 hwi_complete_cmd(beiscsi_conn, phba, sol); 1924 break; 1925 case DRIVERMSG_NOTIFY: 1926 beiscsi_log(phba, KERN_INFO, 1927 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1928 "BM_%d : Received %s[%d] on CID : %d\n", 1929 cqe_desc[code], code, cid); 1930 1931 dmsg = (struct dmsg_cqe *)sol; 1932 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1933 break; 1934 case UNSOL_HDR_NOTIFY: 1935 beiscsi_log(phba, KERN_INFO, 1936 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1937 "BM_%d : Received %s[%d] on CID : %d\n", 1938 cqe_desc[code], code, cid); 1939 1940 spin_lock_bh(&phba->async_pdu_lock); 1941 beiscsi_hdq_process_compl(beiscsi_conn, 1942 (struct i_t_dpdu_cqe *)sol); 1943 spin_unlock_bh(&phba->async_pdu_lock); 1944 break; 1945 case UNSOL_DATA_NOTIFY: 1946 beiscsi_log(phba, KERN_INFO, 1947 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1948 "BM_%d : Received %s[%d] on CID : %d\n", 1949 cqe_desc[code], code, cid); 1950 1951 spin_lock_bh(&phba->async_pdu_lock); 1952 beiscsi_hdq_process_compl(beiscsi_conn, 1953 (struct i_t_dpdu_cqe *)sol); 1954 spin_unlock_bh(&phba->async_pdu_lock); 1955 break; 1956 case CXN_INVALIDATE_INDEX_NOTIFY: 1957 case CMD_INVALIDATED_NOTIFY: 1958 case CXN_INVALIDATE_NOTIFY: 1959 beiscsi_log(phba, KERN_ERR, 1960 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1961 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1962 cqe_desc[code], code, cid); 1963 break; 1964 case CXN_KILLED_HDR_DIGEST_ERR: 1965 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1966 beiscsi_log(phba, KERN_ERR, 1967 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1968 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1969 cqe_desc[code], code, cid); 1970 break; 1971 case CMD_KILLED_INVALID_STATSN_RCVD: 1972 case CMD_KILLED_INVALID_R2T_RCVD: 1973 case CMD_CXN_KILLED_LUN_INVALID: 1974 case CMD_CXN_KILLED_ICD_INVALID: 1975 case CMD_CXN_KILLED_ITT_INVALID: 1976 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1977 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1978 beiscsi_log(phba, KERN_ERR, 1979 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1980 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1981 cqe_desc[code], code, cid); 1982 break; 1983 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1984 beiscsi_log(phba, KERN_ERR, 1985 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1986 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1987 cqe_desc[code], code, cid); 1988 spin_lock_bh(&phba->async_pdu_lock); 1989 /* driver consumes the entry and drops the contents */ 1990 beiscsi_hdq_process_compl(beiscsi_conn, 1991 (struct i_t_dpdu_cqe *)sol); 1992 spin_unlock_bh(&phba->async_pdu_lock); 1993 break; 1994 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 1995 case CXN_KILLED_BURST_LEN_MISMATCH: 1996 case CXN_KILLED_AHS_RCVD: 1997 case CXN_KILLED_UNKNOWN_HDR: 1998 case CXN_KILLED_STALE_ITT_TTT_RCVD: 1999 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2000 case CXN_KILLED_TIMED_OUT: 2001 case CXN_KILLED_FIN_RCVD: 2002 case CXN_KILLED_RST_SENT: 2003 case CXN_KILLED_RST_RCVD: 2004 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2005 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2006 case CXN_KILLED_OVER_RUN_RESIDUAL: 2007 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2008 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2009 beiscsi_log(phba, KERN_ERR, 2010 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2011 "BM_%d : Event %s[%d] received on CID : %d\n", 2012 cqe_desc[code], code, cid); 2013 if (beiscsi_conn) 2014 iscsi_conn_failure(beiscsi_conn->conn, 2015 ISCSI_ERR_CONN_FAILED); 2016 break; 2017 default: 2018 beiscsi_log(phba, KERN_ERR, 2019 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2020 "BM_%d : Invalid CQE Event Received Code : %d" 2021 "CID 0x%x...\n", 2022 code, cid); 2023 break; 2024 } 2025 2026 proc_next_cqe: 2027 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2028 queue_tail_inc(cq); 2029 sol = queue_tail_node(cq); 2030 num_processed++; 2031 if (total == budget) 2032 break; 2033 } 2034 2035 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2036 return total; 2037 } 2038 2039 static int be_iopoll(struct irq_poll *iop, int budget) 2040 { 2041 unsigned int ret, io_events; 2042 struct beiscsi_hba *phba; 2043 struct be_eq_obj *pbe_eq; 2044 struct be_eq_entry *eqe = NULL; 2045 struct be_queue_info *eq; 2046 2047 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2048 phba = pbe_eq->phba; 2049 if (beiscsi_hba_in_error(phba)) { 2050 irq_poll_complete(iop); 2051 return 0; 2052 } 2053 2054 io_events = 0; 2055 eq = &pbe_eq->q; 2056 eqe = queue_tail_node(eq); 2057 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2058 EQE_VALID_MASK) { 2059 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2060 queue_tail_inc(eq); 2061 eqe = queue_tail_node(eq); 2062 io_events++; 2063 } 2064 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2065 2066 ret = beiscsi_process_cq(pbe_eq, budget); 2067 pbe_eq->cq_count += ret; 2068 if (ret < budget) { 2069 irq_poll_complete(iop); 2070 beiscsi_log(phba, KERN_INFO, 2071 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2072 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2073 pbe_eq->q.id, ret); 2074 if (!beiscsi_hba_in_error(phba)) 2075 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2076 } 2077 return ret; 2078 } 2079 2080 static void 2081 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2082 unsigned int num_sg, struct beiscsi_io_task *io_task) 2083 { 2084 struct iscsi_sge *psgl; 2085 unsigned int sg_len, index; 2086 unsigned int sge_len = 0; 2087 unsigned long long addr; 2088 struct scatterlist *l_sg; 2089 unsigned int offset; 2090 2091 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2092 io_task->bhs_pa.u.a32.address_lo); 2093 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2094 io_task->bhs_pa.u.a32.address_hi); 2095 2096 l_sg = sg; 2097 for (index = 0; (index < num_sg) && (index < 2); index++, 2098 sg = sg_next(sg)) { 2099 if (index == 0) { 2100 sg_len = sg_dma_len(sg); 2101 addr = (u64) sg_dma_address(sg); 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2103 sge0_addr_lo, pwrb, 2104 lower_32_bits(addr)); 2105 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2106 sge0_addr_hi, pwrb, 2107 upper_32_bits(addr)); 2108 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2109 sge0_len, pwrb, 2110 sg_len); 2111 sge_len = sg_len; 2112 } else { 2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2114 pwrb, sge_len); 2115 sg_len = sg_dma_len(sg); 2116 addr = (u64) sg_dma_address(sg); 2117 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2118 sge1_addr_lo, pwrb, 2119 lower_32_bits(addr)); 2120 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2121 sge1_addr_hi, pwrb, 2122 upper_32_bits(addr)); 2123 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2124 sge1_len, pwrb, 2125 sg_len); 2126 } 2127 } 2128 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2129 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2130 2131 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2132 2133 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2134 io_task->bhs_pa.u.a32.address_hi); 2135 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2136 io_task->bhs_pa.u.a32.address_lo); 2137 2138 if (num_sg == 1) { 2139 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2140 1); 2141 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2142 0); 2143 } else if (num_sg == 2) { 2144 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2145 0); 2146 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2147 1); 2148 } else { 2149 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2150 0); 2151 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2152 0); 2153 } 2154 2155 sg = l_sg; 2156 psgl++; 2157 psgl++; 2158 offset = 0; 2159 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2160 sg_len = sg_dma_len(sg); 2161 addr = (u64) sg_dma_address(sg); 2162 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2163 lower_32_bits(addr)); 2164 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2165 upper_32_bits(addr)); 2166 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2167 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2168 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2169 offset += sg_len; 2170 } 2171 psgl--; 2172 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2173 } 2174 2175 static void 2176 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2177 unsigned int num_sg, struct beiscsi_io_task *io_task) 2178 { 2179 struct iscsi_sge *psgl; 2180 unsigned int sg_len, index; 2181 unsigned int sge_len = 0; 2182 unsigned long long addr; 2183 struct scatterlist *l_sg; 2184 unsigned int offset; 2185 2186 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2187 io_task->bhs_pa.u.a32.address_lo); 2188 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2189 io_task->bhs_pa.u.a32.address_hi); 2190 2191 l_sg = sg; 2192 for (index = 0; (index < num_sg) && (index < 2); index++, 2193 sg = sg_next(sg)) { 2194 if (index == 0) { 2195 sg_len = sg_dma_len(sg); 2196 addr = (u64) sg_dma_address(sg); 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2198 ((u32)(addr & 0xFFFFFFFF))); 2199 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2200 ((u32)(addr >> 32))); 2201 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2202 sg_len); 2203 sge_len = sg_len; 2204 } else { 2205 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2206 pwrb, sge_len); 2207 sg_len = sg_dma_len(sg); 2208 addr = (u64) sg_dma_address(sg); 2209 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2210 ((u32)(addr & 0xFFFFFFFF))); 2211 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2212 ((u32)(addr >> 32))); 2213 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2214 sg_len); 2215 } 2216 } 2217 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2218 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2219 2220 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2221 2222 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2223 io_task->bhs_pa.u.a32.address_hi); 2224 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2225 io_task->bhs_pa.u.a32.address_lo); 2226 2227 if (num_sg == 1) { 2228 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2229 1); 2230 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2231 0); 2232 } else if (num_sg == 2) { 2233 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2234 0); 2235 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2236 1); 2237 } else { 2238 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2239 0); 2240 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2241 0); 2242 } 2243 sg = l_sg; 2244 psgl++; 2245 psgl++; 2246 offset = 0; 2247 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2248 sg_len = sg_dma_len(sg); 2249 addr = (u64) sg_dma_address(sg); 2250 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2251 (addr & 0xFFFFFFFF)); 2252 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2253 (addr >> 32)); 2254 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2255 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2256 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2257 offset += sg_len; 2258 } 2259 psgl--; 2260 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2261 } 2262 2263 /** 2264 * hwi_write_buffer()- Populate the WRB with task info 2265 * @pwrb: ptr to the WRB entry 2266 * @task: iscsi task which is to be executed 2267 **/ 2268 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2269 { 2270 struct iscsi_sge *psgl; 2271 struct beiscsi_io_task *io_task = task->dd_data; 2272 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2273 struct beiscsi_hba *phba = beiscsi_conn->phba; 2274 uint8_t dsp_value = 0; 2275 2276 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2277 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2278 io_task->bhs_pa.u.a32.address_lo); 2279 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2280 io_task->bhs_pa.u.a32.address_hi); 2281 2282 if (task->data) { 2283 2284 /* Check for the data_count */ 2285 dsp_value = (task->data_count) ? 1 : 0; 2286 2287 if (is_chip_be2_be3r(phba)) 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2289 pwrb, dsp_value); 2290 else 2291 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2292 pwrb, dsp_value); 2293 2294 /* Map addr only if there is data_count */ 2295 if (dsp_value) { 2296 io_task->mtask_addr = pci_map_single(phba->pcidev, 2297 task->data, 2298 task->data_count, 2299 PCI_DMA_TODEVICE); 2300 if (pci_dma_mapping_error(phba->pcidev, 2301 io_task->mtask_addr)) 2302 return -ENOMEM; 2303 io_task->mtask_data_count = task->data_count; 2304 } else 2305 io_task->mtask_addr = 0; 2306 2307 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2308 lower_32_bits(io_task->mtask_addr)); 2309 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2310 upper_32_bits(io_task->mtask_addr)); 2311 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2312 task->data_count); 2313 2314 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2315 } else { 2316 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2317 io_task->mtask_addr = 0; 2318 } 2319 2320 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2321 2322 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2323 2324 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2325 io_task->bhs_pa.u.a32.address_hi); 2326 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2327 io_task->bhs_pa.u.a32.address_lo); 2328 if (task->data) { 2329 psgl++; 2330 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2331 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2332 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2333 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2334 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2335 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2336 2337 psgl++; 2338 if (task->data) { 2339 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2340 lower_32_bits(io_task->mtask_addr)); 2341 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2342 upper_32_bits(io_task->mtask_addr)); 2343 } 2344 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2345 } 2346 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2347 return 0; 2348 } 2349 2350 /** 2351 * beiscsi_find_mem_req()- Find mem needed 2352 * @phba: ptr to HBA struct 2353 **/ 2354 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2355 { 2356 uint8_t mem_descr_index, ulp_num; 2357 unsigned int num_async_pdu_buf_pages; 2358 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2359 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2360 2361 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2362 2363 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2364 BE_ISCSI_PDU_HEADER_SIZE; 2365 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2366 sizeof(struct hwi_context_memory); 2367 2368 2369 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2370 * (phba->params.wrbs_per_cxn) 2371 * phba->params.cxns_per_ctrl; 2372 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2373 (phba->params.wrbs_per_cxn); 2374 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2375 phba->params.cxns_per_ctrl); 2376 2377 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2378 phba->params.icds_per_ctrl; 2379 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2380 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2381 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2382 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2383 2384 num_async_pdu_buf_sgl_pages = 2385 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2386 phba, ulp_num) * 2387 sizeof(struct phys_addr)); 2388 2389 num_async_pdu_buf_pages = 2390 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2391 phba, ulp_num) * 2392 phba->params.defpdu_hdr_sz); 2393 2394 num_async_pdu_data_pages = 2395 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2396 phba, ulp_num) * 2397 phba->params.defpdu_data_sz); 2398 2399 num_async_pdu_data_sgl_pages = 2400 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2401 phba, ulp_num) * 2402 sizeof(struct phys_addr)); 2403 2404 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2405 (ulp_num * MEM_DESCR_OFFSET)); 2406 phba->mem_req[mem_descr_index] = 2407 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2408 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2409 2410 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2411 (ulp_num * MEM_DESCR_OFFSET)); 2412 phba->mem_req[mem_descr_index] = 2413 num_async_pdu_buf_pages * 2414 PAGE_SIZE; 2415 2416 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2417 (ulp_num * MEM_DESCR_OFFSET)); 2418 phba->mem_req[mem_descr_index] = 2419 num_async_pdu_data_pages * 2420 PAGE_SIZE; 2421 2422 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2423 (ulp_num * MEM_DESCR_OFFSET)); 2424 phba->mem_req[mem_descr_index] = 2425 num_async_pdu_buf_sgl_pages * 2426 PAGE_SIZE; 2427 2428 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2429 (ulp_num * MEM_DESCR_OFFSET)); 2430 phba->mem_req[mem_descr_index] = 2431 num_async_pdu_data_sgl_pages * 2432 PAGE_SIZE; 2433 2434 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2435 (ulp_num * MEM_DESCR_OFFSET)); 2436 phba->mem_req[mem_descr_index] = 2437 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2438 sizeof(struct hd_async_handle); 2439 2440 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2441 (ulp_num * MEM_DESCR_OFFSET)); 2442 phba->mem_req[mem_descr_index] = 2443 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2444 sizeof(struct hd_async_handle); 2445 2446 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2447 (ulp_num * MEM_DESCR_OFFSET)); 2448 phba->mem_req[mem_descr_index] = 2449 sizeof(struct hd_async_context) + 2450 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2451 sizeof(struct hd_async_entry)); 2452 } 2453 } 2454 } 2455 2456 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2457 { 2458 dma_addr_t bus_add; 2459 struct hwi_controller *phwi_ctrlr; 2460 struct be_mem_descriptor *mem_descr; 2461 struct mem_array *mem_arr, *mem_arr_orig; 2462 unsigned int i, j, alloc_size, curr_alloc_size; 2463 2464 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2465 if (!phba->phwi_ctrlr) 2466 return -ENOMEM; 2467 2468 /* Allocate memory for wrb_context */ 2469 phwi_ctrlr = phba->phwi_ctrlr; 2470 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2471 phba->params.cxns_per_ctrl, 2472 GFP_KERNEL); 2473 if (!phwi_ctrlr->wrb_context) { 2474 kfree(phba->phwi_ctrlr); 2475 return -ENOMEM; 2476 } 2477 2478 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2479 GFP_KERNEL); 2480 if (!phba->init_mem) { 2481 kfree(phwi_ctrlr->wrb_context); 2482 kfree(phba->phwi_ctrlr); 2483 return -ENOMEM; 2484 } 2485 2486 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2487 GFP_KERNEL); 2488 if (!mem_arr_orig) { 2489 kfree(phba->init_mem); 2490 kfree(phwi_ctrlr->wrb_context); 2491 kfree(phba->phwi_ctrlr); 2492 return -ENOMEM; 2493 } 2494 2495 mem_descr = phba->init_mem; 2496 for (i = 0; i < SE_MEM_MAX; i++) { 2497 if (!phba->mem_req[i]) { 2498 mem_descr->mem_array = NULL; 2499 mem_descr++; 2500 continue; 2501 } 2502 2503 j = 0; 2504 mem_arr = mem_arr_orig; 2505 alloc_size = phba->mem_req[i]; 2506 memset(mem_arr, 0, sizeof(struct mem_array) * 2507 BEISCSI_MAX_FRAGS_INIT); 2508 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2509 do { 2510 mem_arr->virtual_address = pci_alloc_consistent( 2511 phba->pcidev, 2512 curr_alloc_size, 2513 &bus_add); 2514 if (!mem_arr->virtual_address) { 2515 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2516 goto free_mem; 2517 if (curr_alloc_size - 2518 rounddown_pow_of_two(curr_alloc_size)) 2519 curr_alloc_size = rounddown_pow_of_two 2520 (curr_alloc_size); 2521 else 2522 curr_alloc_size = curr_alloc_size / 2; 2523 } else { 2524 mem_arr->bus_address.u. 2525 a64.address = (__u64) bus_add; 2526 mem_arr->size = curr_alloc_size; 2527 alloc_size -= curr_alloc_size; 2528 curr_alloc_size = min(be_max_phys_size * 2529 1024, alloc_size); 2530 j++; 2531 mem_arr++; 2532 } 2533 } while (alloc_size); 2534 mem_descr->num_elements = j; 2535 mem_descr->size_in_bytes = phba->mem_req[i]; 2536 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2537 GFP_KERNEL); 2538 if (!mem_descr->mem_array) 2539 goto free_mem; 2540 2541 memcpy(mem_descr->mem_array, mem_arr_orig, 2542 sizeof(struct mem_array) * j); 2543 mem_descr++; 2544 } 2545 kfree(mem_arr_orig); 2546 return 0; 2547 free_mem: 2548 mem_descr->num_elements = j; 2549 while ((i) || (j)) { 2550 for (j = mem_descr->num_elements; j > 0; j--) { 2551 pci_free_consistent(phba->pcidev, 2552 mem_descr->mem_array[j - 1].size, 2553 mem_descr->mem_array[j - 1]. 2554 virtual_address, 2555 (unsigned long)mem_descr-> 2556 mem_array[j - 1]. 2557 bus_address.u.a64.address); 2558 } 2559 if (i) { 2560 i--; 2561 kfree(mem_descr->mem_array); 2562 mem_descr--; 2563 } 2564 } 2565 kfree(mem_arr_orig); 2566 kfree(phba->init_mem); 2567 kfree(phba->phwi_ctrlr->wrb_context); 2568 kfree(phba->phwi_ctrlr); 2569 return -ENOMEM; 2570 } 2571 2572 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2573 { 2574 beiscsi_find_mem_req(phba); 2575 return beiscsi_alloc_mem(phba); 2576 } 2577 2578 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2579 { 2580 struct pdu_data_out *pdata_out; 2581 struct pdu_nop_out *pnop_out; 2582 struct be_mem_descriptor *mem_descr; 2583 2584 mem_descr = phba->init_mem; 2585 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2586 pdata_out = 2587 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2588 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2589 2590 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2591 IIOC_SCSI_DATA); 2592 2593 pnop_out = 2594 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2595 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2596 2597 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2598 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2599 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2600 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2601 } 2602 2603 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2604 { 2605 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2606 struct hwi_context_memory *phwi_ctxt; 2607 struct wrb_handle *pwrb_handle = NULL; 2608 struct hwi_controller *phwi_ctrlr; 2609 struct hwi_wrb_context *pwrb_context; 2610 struct iscsi_wrb *pwrb = NULL; 2611 unsigned int num_cxn_wrbh = 0; 2612 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2613 2614 mem_descr_wrbh = phba->init_mem; 2615 mem_descr_wrbh += HWI_MEM_WRBH; 2616 2617 mem_descr_wrb = phba->init_mem; 2618 mem_descr_wrb += HWI_MEM_WRB; 2619 phwi_ctrlr = phba->phwi_ctrlr; 2620 2621 /* Allocate memory for WRBQ */ 2622 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2623 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2624 phba->params.cxns_per_ctrl, 2625 GFP_KERNEL); 2626 if (!phwi_ctxt->be_wrbq) { 2627 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2628 "BM_%d : WRBQ Mem Alloc Failed\n"); 2629 return -ENOMEM; 2630 } 2631 2632 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2633 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2634 pwrb_context->pwrb_handle_base = 2635 kzalloc(sizeof(struct wrb_handle *) * 2636 phba->params.wrbs_per_cxn, GFP_KERNEL); 2637 if (!pwrb_context->pwrb_handle_base) { 2638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2639 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2640 goto init_wrb_hndl_failed; 2641 } 2642 pwrb_context->pwrb_handle_basestd = 2643 kzalloc(sizeof(struct wrb_handle *) * 2644 phba->params.wrbs_per_cxn, GFP_KERNEL); 2645 if (!pwrb_context->pwrb_handle_basestd) { 2646 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2647 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2648 goto init_wrb_hndl_failed; 2649 } 2650 if (!num_cxn_wrbh) { 2651 pwrb_handle = 2652 mem_descr_wrbh->mem_array[idx].virtual_address; 2653 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2654 ((sizeof(struct wrb_handle)) * 2655 phba->params.wrbs_per_cxn)); 2656 idx++; 2657 } 2658 pwrb_context->alloc_index = 0; 2659 pwrb_context->wrb_handles_available = 0; 2660 pwrb_context->free_index = 0; 2661 2662 if (num_cxn_wrbh) { 2663 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2664 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2665 pwrb_context->pwrb_handle_basestd[j] = 2666 pwrb_handle; 2667 pwrb_context->wrb_handles_available++; 2668 pwrb_handle->wrb_index = j; 2669 pwrb_handle++; 2670 } 2671 num_cxn_wrbh--; 2672 } 2673 spin_lock_init(&pwrb_context->wrb_lock); 2674 } 2675 idx = 0; 2676 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2677 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2678 if (!num_cxn_wrb) { 2679 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2680 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2681 ((sizeof(struct iscsi_wrb) * 2682 phba->params.wrbs_per_cxn)); 2683 idx++; 2684 } 2685 2686 if (num_cxn_wrb) { 2687 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2688 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2689 pwrb_handle->pwrb = pwrb; 2690 pwrb++; 2691 } 2692 num_cxn_wrb--; 2693 } 2694 } 2695 return 0; 2696 init_wrb_hndl_failed: 2697 for (j = index; j > 0; j--) { 2698 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2699 kfree(pwrb_context->pwrb_handle_base); 2700 kfree(pwrb_context->pwrb_handle_basestd); 2701 } 2702 return -ENOMEM; 2703 } 2704 2705 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2706 { 2707 uint8_t ulp_num; 2708 struct hwi_controller *phwi_ctrlr; 2709 struct hba_parameters *p = &phba->params; 2710 struct hd_async_context *pasync_ctx; 2711 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2712 unsigned int index, idx, num_per_mem, num_async_data; 2713 struct be_mem_descriptor *mem_descr; 2714 2715 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2716 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2717 /* get async_ctx for each ULP */ 2718 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2719 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2720 (ulp_num * MEM_DESCR_OFFSET)); 2721 2722 phwi_ctrlr = phba->phwi_ctrlr; 2723 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2724 (struct hd_async_context *) 2725 mem_descr->mem_array[0].virtual_address; 2726 2727 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2728 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2729 2730 pasync_ctx->async_entry = 2731 (struct hd_async_entry *) 2732 ((long unsigned int)pasync_ctx + 2733 sizeof(struct hd_async_context)); 2734 2735 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2736 ulp_num); 2737 /* setup header buffers */ 2738 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2739 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2740 (ulp_num * MEM_DESCR_OFFSET); 2741 if (mem_descr->mem_array[0].virtual_address) { 2742 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2743 "BM_%d : hwi_init_async_pdu_ctx" 2744 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2745 ulp_num, 2746 mem_descr->mem_array[0]. 2747 virtual_address); 2748 } else 2749 beiscsi_log(phba, KERN_WARNING, 2750 BEISCSI_LOG_INIT, 2751 "BM_%d : No Virtual address for ULP : %d\n", 2752 ulp_num); 2753 2754 pasync_ctx->async_header.pi = 0; 2755 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2756 pasync_ctx->async_header.va_base = 2757 mem_descr->mem_array[0].virtual_address; 2758 2759 pasync_ctx->async_header.pa_base.u.a64.address = 2760 mem_descr->mem_array[0]. 2761 bus_address.u.a64.address; 2762 2763 /* setup header buffer sgls */ 2764 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2765 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2766 (ulp_num * MEM_DESCR_OFFSET); 2767 if (mem_descr->mem_array[0].virtual_address) { 2768 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2769 "BM_%d : hwi_init_async_pdu_ctx" 2770 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2771 ulp_num, 2772 mem_descr->mem_array[0]. 2773 virtual_address); 2774 } else 2775 beiscsi_log(phba, KERN_WARNING, 2776 BEISCSI_LOG_INIT, 2777 "BM_%d : No Virtual address for ULP : %d\n", 2778 ulp_num); 2779 2780 pasync_ctx->async_header.ring_base = 2781 mem_descr->mem_array[0].virtual_address; 2782 2783 /* setup header buffer handles */ 2784 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2785 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2786 (ulp_num * MEM_DESCR_OFFSET); 2787 if (mem_descr->mem_array[0].virtual_address) { 2788 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2789 "BM_%d : hwi_init_async_pdu_ctx" 2790 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2791 ulp_num, 2792 mem_descr->mem_array[0]. 2793 virtual_address); 2794 } else 2795 beiscsi_log(phba, KERN_WARNING, 2796 BEISCSI_LOG_INIT, 2797 "BM_%d : No Virtual address for ULP : %d\n", 2798 ulp_num); 2799 2800 pasync_ctx->async_header.handle_base = 2801 mem_descr->mem_array[0].virtual_address; 2802 2803 /* setup data buffer sgls */ 2804 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2805 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2806 (ulp_num * MEM_DESCR_OFFSET); 2807 if (mem_descr->mem_array[0].virtual_address) { 2808 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2809 "BM_%d : hwi_init_async_pdu_ctx" 2810 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2811 ulp_num, 2812 mem_descr->mem_array[0]. 2813 virtual_address); 2814 } else 2815 beiscsi_log(phba, KERN_WARNING, 2816 BEISCSI_LOG_INIT, 2817 "BM_%d : No Virtual address for ULP : %d\n", 2818 ulp_num); 2819 2820 pasync_ctx->async_data.ring_base = 2821 mem_descr->mem_array[0].virtual_address; 2822 2823 /* setup data buffer handles */ 2824 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2825 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2826 (ulp_num * MEM_DESCR_OFFSET); 2827 if (!mem_descr->mem_array[0].virtual_address) 2828 beiscsi_log(phba, KERN_WARNING, 2829 BEISCSI_LOG_INIT, 2830 "BM_%d : No Virtual address for ULP : %d\n", 2831 ulp_num); 2832 2833 pasync_ctx->async_data.handle_base = 2834 mem_descr->mem_array[0].virtual_address; 2835 2836 pasync_header_h = 2837 (struct hd_async_handle *) 2838 pasync_ctx->async_header.handle_base; 2839 pasync_data_h = 2840 (struct hd_async_handle *) 2841 pasync_ctx->async_data.handle_base; 2842 2843 /* setup data buffers */ 2844 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2845 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2846 (ulp_num * MEM_DESCR_OFFSET); 2847 if (mem_descr->mem_array[0].virtual_address) { 2848 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2849 "BM_%d : hwi_init_async_pdu_ctx" 2850 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2851 ulp_num, 2852 mem_descr->mem_array[0]. 2853 virtual_address); 2854 } else 2855 beiscsi_log(phba, KERN_WARNING, 2856 BEISCSI_LOG_INIT, 2857 "BM_%d : No Virtual address for ULP : %d\n", 2858 ulp_num); 2859 2860 idx = 0; 2861 pasync_ctx->async_data.pi = 0; 2862 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2863 pasync_ctx->async_data.va_base = 2864 mem_descr->mem_array[idx].virtual_address; 2865 pasync_ctx->async_data.pa_base.u.a64.address = 2866 mem_descr->mem_array[idx]. 2867 bus_address.u.a64.address; 2868 2869 num_async_data = ((mem_descr->mem_array[idx].size) / 2870 phba->params.defpdu_data_sz); 2871 num_per_mem = 0; 2872 2873 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2874 (phba, ulp_num); index++) { 2875 pasync_header_h->cri = -1; 2876 pasync_header_h->is_header = 1; 2877 pasync_header_h->index = index; 2878 INIT_LIST_HEAD(&pasync_header_h->link); 2879 pasync_header_h->pbuffer = 2880 (void *)((unsigned long) 2881 (pasync_ctx-> 2882 async_header.va_base) + 2883 (p->defpdu_hdr_sz * index)); 2884 2885 pasync_header_h->pa.u.a64.address = 2886 pasync_ctx->async_header.pa_base.u.a64. 2887 address + (p->defpdu_hdr_sz * index); 2888 2889 pasync_ctx->async_entry[index].header = 2890 pasync_header_h; 2891 pasync_header_h++; 2892 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2893 wq.list); 2894 2895 pasync_data_h->cri = -1; 2896 pasync_data_h->is_header = 0; 2897 pasync_data_h->index = index; 2898 INIT_LIST_HEAD(&pasync_data_h->link); 2899 2900 if (!num_async_data) { 2901 num_per_mem = 0; 2902 idx++; 2903 pasync_ctx->async_data.va_base = 2904 mem_descr->mem_array[idx]. 2905 virtual_address; 2906 pasync_ctx->async_data.pa_base.u. 2907 a64.address = 2908 mem_descr->mem_array[idx]. 2909 bus_address.u.a64.address; 2910 num_async_data = 2911 ((mem_descr->mem_array[idx]. 2912 size) / 2913 phba->params.defpdu_data_sz); 2914 } 2915 pasync_data_h->pbuffer = 2916 (void *)((unsigned long) 2917 (pasync_ctx->async_data.va_base) + 2918 (p->defpdu_data_sz * num_per_mem)); 2919 2920 pasync_data_h->pa.u.a64.address = 2921 pasync_ctx->async_data.pa_base.u.a64. 2922 address + (p->defpdu_data_sz * 2923 num_per_mem); 2924 num_per_mem++; 2925 num_async_data--; 2926 2927 pasync_ctx->async_entry[index].data = 2928 pasync_data_h; 2929 pasync_data_h++; 2930 } 2931 } 2932 } 2933 2934 return 0; 2935 } 2936 2937 static int 2938 be_sgl_create_contiguous(void *virtual_address, 2939 u64 physical_address, u32 length, 2940 struct be_dma_mem *sgl) 2941 { 2942 WARN_ON(!virtual_address); 2943 WARN_ON(!physical_address); 2944 WARN_ON(!length); 2945 WARN_ON(!sgl); 2946 2947 sgl->va = virtual_address; 2948 sgl->dma = (unsigned long)physical_address; 2949 sgl->size = length; 2950 2951 return 0; 2952 } 2953 2954 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2955 { 2956 memset(sgl, 0, sizeof(*sgl)); 2957 } 2958 2959 static void 2960 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2961 struct mem_array *pmem, struct be_dma_mem *sgl) 2962 { 2963 if (sgl->va) 2964 be_sgl_destroy_contiguous(sgl); 2965 2966 be_sgl_create_contiguous(pmem->virtual_address, 2967 pmem->bus_address.u.a64.address, 2968 pmem->size, sgl); 2969 } 2970 2971 static void 2972 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2973 struct mem_array *pmem, struct be_dma_mem *sgl) 2974 { 2975 if (sgl->va) 2976 be_sgl_destroy_contiguous(sgl); 2977 2978 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2979 pmem->bus_address.u.a64.address, 2980 pmem->size, sgl); 2981 } 2982 2983 static int be_fill_queue(struct be_queue_info *q, 2984 u16 len, u16 entry_size, void *vaddress) 2985 { 2986 struct be_dma_mem *mem = &q->dma_mem; 2987 2988 memset(q, 0, sizeof(*q)); 2989 q->len = len; 2990 q->entry_size = entry_size; 2991 mem->size = len * entry_size; 2992 mem->va = vaddress; 2993 if (!mem->va) 2994 return -ENOMEM; 2995 memset(mem->va, 0, mem->size); 2996 return 0; 2997 } 2998 2999 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3000 struct hwi_context_memory *phwi_context) 3001 { 3002 int ret = -ENOMEM, eq_for_mcc; 3003 unsigned int i, num_eq_pages; 3004 struct be_queue_info *eq; 3005 struct be_dma_mem *mem; 3006 void *eq_vaddress; 3007 dma_addr_t paddr; 3008 3009 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3010 sizeof(struct be_eq_entry)); 3011 3012 if (phba->pcidev->msix_enabled) 3013 eq_for_mcc = 1; 3014 else 3015 eq_for_mcc = 0; 3016 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3017 eq = &phwi_context->be_eq[i].q; 3018 mem = &eq->dma_mem; 3019 phwi_context->be_eq[i].phba = phba; 3020 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3021 num_eq_pages * PAGE_SIZE, 3022 &paddr); 3023 if (!eq_vaddress) { 3024 ret = -ENOMEM; 3025 goto create_eq_error; 3026 } 3027 3028 mem->va = eq_vaddress; 3029 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3030 sizeof(struct be_eq_entry), eq_vaddress); 3031 if (ret) { 3032 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3033 "BM_%d : be_fill_queue Failed for EQ\n"); 3034 goto create_eq_error; 3035 } 3036 3037 mem->dma = paddr; 3038 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3039 BEISCSI_EQ_DELAY_DEF); 3040 if (ret) { 3041 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3042 "BM_%d : beiscsi_cmd_eq_create" 3043 "Failed for EQ\n"); 3044 goto create_eq_error; 3045 } 3046 3047 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3048 "BM_%d : eqid = %d\n", 3049 phwi_context->be_eq[i].q.id); 3050 } 3051 return 0; 3052 3053 create_eq_error: 3054 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3055 eq = &phwi_context->be_eq[i].q; 3056 mem = &eq->dma_mem; 3057 if (mem->va) 3058 pci_free_consistent(phba->pcidev, num_eq_pages 3059 * PAGE_SIZE, 3060 mem->va, mem->dma); 3061 } 3062 return ret; 3063 } 3064 3065 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3066 struct hwi_context_memory *phwi_context) 3067 { 3068 unsigned int i, num_cq_pages; 3069 struct be_queue_info *cq, *eq; 3070 struct be_dma_mem *mem; 3071 struct be_eq_obj *pbe_eq; 3072 void *cq_vaddress; 3073 int ret = -ENOMEM; 3074 dma_addr_t paddr; 3075 3076 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3077 sizeof(struct sol_cqe)); 3078 3079 for (i = 0; i < phba->num_cpus; i++) { 3080 cq = &phwi_context->be_cq[i]; 3081 eq = &phwi_context->be_eq[i].q; 3082 pbe_eq = &phwi_context->be_eq[i]; 3083 pbe_eq->cq = cq; 3084 pbe_eq->phba = phba; 3085 mem = &cq->dma_mem; 3086 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3087 num_cq_pages * PAGE_SIZE, 3088 &paddr); 3089 if (!cq_vaddress) { 3090 ret = -ENOMEM; 3091 goto create_cq_error; 3092 } 3093 3094 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3095 sizeof(struct sol_cqe), cq_vaddress); 3096 if (ret) { 3097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3098 "BM_%d : be_fill_queue Failed " 3099 "for ISCSI CQ\n"); 3100 goto create_cq_error; 3101 } 3102 3103 mem->dma = paddr; 3104 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3105 false, 0); 3106 if (ret) { 3107 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3108 "BM_%d : beiscsi_cmd_eq_create" 3109 "Failed for ISCSI CQ\n"); 3110 goto create_cq_error; 3111 } 3112 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3113 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3114 "iSCSI CQ CREATED\n", cq->id, eq->id); 3115 } 3116 return 0; 3117 3118 create_cq_error: 3119 for (i = 0; i < phba->num_cpus; i++) { 3120 cq = &phwi_context->be_cq[i]; 3121 mem = &cq->dma_mem; 3122 if (mem->va) 3123 pci_free_consistent(phba->pcidev, num_cq_pages 3124 * PAGE_SIZE, 3125 mem->va, mem->dma); 3126 } 3127 return ret; 3128 } 3129 3130 static int 3131 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3132 struct hwi_context_memory *phwi_context, 3133 struct hwi_controller *phwi_ctrlr, 3134 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3135 { 3136 unsigned int idx; 3137 int ret; 3138 struct be_queue_info *dq, *cq; 3139 struct be_dma_mem *mem; 3140 struct be_mem_descriptor *mem_descr; 3141 void *dq_vaddress; 3142 3143 idx = 0; 3144 dq = &phwi_context->be_def_hdrq[ulp_num]; 3145 cq = &phwi_context->be_cq[0]; 3146 mem = &dq->dma_mem; 3147 mem_descr = phba->init_mem; 3148 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3149 (ulp_num * MEM_DESCR_OFFSET); 3150 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3151 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3152 sizeof(struct phys_addr), 3153 sizeof(struct phys_addr), dq_vaddress); 3154 if (ret) { 3155 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3156 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3157 ulp_num); 3158 3159 return ret; 3160 } 3161 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3162 bus_address.u.a64.address; 3163 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3164 def_pdu_ring_sz, 3165 phba->params.defpdu_hdr_sz, 3166 BEISCSI_DEFQ_HDR, ulp_num); 3167 if (ret) { 3168 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3169 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3170 ulp_num); 3171 3172 return ret; 3173 } 3174 3175 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3176 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3177 ulp_num, 3178 phwi_context->be_def_hdrq[ulp_num].id); 3179 return 0; 3180 } 3181 3182 static int 3183 beiscsi_create_def_data(struct beiscsi_hba *phba, 3184 struct hwi_context_memory *phwi_context, 3185 struct hwi_controller *phwi_ctrlr, 3186 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3187 { 3188 unsigned int idx; 3189 int ret; 3190 struct be_queue_info *dataq, *cq; 3191 struct be_dma_mem *mem; 3192 struct be_mem_descriptor *mem_descr; 3193 void *dq_vaddress; 3194 3195 idx = 0; 3196 dataq = &phwi_context->be_def_dataq[ulp_num]; 3197 cq = &phwi_context->be_cq[0]; 3198 mem = &dataq->dma_mem; 3199 mem_descr = phba->init_mem; 3200 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3201 (ulp_num * MEM_DESCR_OFFSET); 3202 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3203 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3204 sizeof(struct phys_addr), 3205 sizeof(struct phys_addr), dq_vaddress); 3206 if (ret) { 3207 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3208 "BM_%d : be_fill_queue Failed for DEF PDU " 3209 "DATA on ULP : %d\n", 3210 ulp_num); 3211 3212 return ret; 3213 } 3214 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3215 bus_address.u.a64.address; 3216 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3217 def_pdu_ring_sz, 3218 phba->params.defpdu_data_sz, 3219 BEISCSI_DEFQ_DATA, ulp_num); 3220 if (ret) { 3221 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3222 "BM_%d be_cmd_create_default_pdu_queue" 3223 " Failed for DEF PDU DATA on ULP : %d\n", 3224 ulp_num); 3225 return ret; 3226 } 3227 3228 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3229 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3230 ulp_num, 3231 phwi_context->be_def_dataq[ulp_num].id); 3232 3233 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3234 "BM_%d : DEFAULT PDU DATA RING CREATED" 3235 "on ULP : %d\n", ulp_num); 3236 return 0; 3237 } 3238 3239 3240 static int 3241 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3242 { 3243 struct be_mem_descriptor *mem_descr; 3244 struct mem_array *pm_arr; 3245 struct be_dma_mem sgl; 3246 int status, ulp_num; 3247 3248 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3249 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3250 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3251 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3252 (ulp_num * MEM_DESCR_OFFSET); 3253 pm_arr = mem_descr->mem_array; 3254 3255 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3256 status = be_cmd_iscsi_post_template_hdr( 3257 &phba->ctrl, &sgl); 3258 3259 if (status != 0) { 3260 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3261 "BM_%d : Post Template HDR Failed for" 3262 "ULP_%d\n", ulp_num); 3263 return status; 3264 } 3265 3266 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3267 "BM_%d : Template HDR Pages Posted for" 3268 "ULP_%d\n", ulp_num); 3269 } 3270 } 3271 return 0; 3272 } 3273 3274 static int 3275 beiscsi_post_pages(struct beiscsi_hba *phba) 3276 { 3277 struct be_mem_descriptor *mem_descr; 3278 struct mem_array *pm_arr; 3279 unsigned int page_offset, i; 3280 struct be_dma_mem sgl; 3281 int status, ulp_num = 0; 3282 3283 mem_descr = phba->init_mem; 3284 mem_descr += HWI_MEM_SGE; 3285 pm_arr = mem_descr->mem_array; 3286 3287 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3288 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3289 break; 3290 3291 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3292 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3293 for (i = 0; i < mem_descr->num_elements; i++) { 3294 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3295 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3296 page_offset, 3297 (pm_arr->size / PAGE_SIZE)); 3298 page_offset += pm_arr->size / PAGE_SIZE; 3299 if (status != 0) { 3300 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3301 "BM_%d : post sgl failed.\n"); 3302 return status; 3303 } 3304 pm_arr++; 3305 } 3306 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3307 "BM_%d : POSTED PAGES\n"); 3308 return 0; 3309 } 3310 3311 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3312 { 3313 struct be_dma_mem *mem = &q->dma_mem; 3314 if (mem->va) { 3315 pci_free_consistent(phba->pcidev, mem->size, 3316 mem->va, mem->dma); 3317 mem->va = NULL; 3318 } 3319 } 3320 3321 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3322 u16 len, u16 entry_size) 3323 { 3324 struct be_dma_mem *mem = &q->dma_mem; 3325 3326 memset(q, 0, sizeof(*q)); 3327 q->len = len; 3328 q->entry_size = entry_size; 3329 mem->size = len * entry_size; 3330 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3331 if (!mem->va) 3332 return -ENOMEM; 3333 return 0; 3334 } 3335 3336 static int 3337 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3338 struct hwi_context_memory *phwi_context, 3339 struct hwi_controller *phwi_ctrlr) 3340 { 3341 unsigned int num_wrb_rings; 3342 u64 pa_addr_lo; 3343 unsigned int idx, num, i, ulp_num; 3344 struct mem_array *pwrb_arr; 3345 void *wrb_vaddr; 3346 struct be_dma_mem sgl; 3347 struct be_mem_descriptor *mem_descr; 3348 struct hwi_wrb_context *pwrb_context; 3349 int status; 3350 uint8_t ulp_count = 0, ulp_base_num = 0; 3351 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3352 3353 idx = 0; 3354 mem_descr = phba->init_mem; 3355 mem_descr += HWI_MEM_WRB; 3356 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3357 GFP_KERNEL); 3358 if (!pwrb_arr) { 3359 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3360 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3361 return -ENOMEM; 3362 } 3363 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3364 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3365 num_wrb_rings = mem_descr->mem_array[idx].size / 3366 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3367 3368 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3369 if (num_wrb_rings) { 3370 pwrb_arr[num].virtual_address = wrb_vaddr; 3371 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3372 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3373 sizeof(struct iscsi_wrb); 3374 wrb_vaddr += pwrb_arr[num].size; 3375 pa_addr_lo += pwrb_arr[num].size; 3376 num_wrb_rings--; 3377 } else { 3378 idx++; 3379 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3380 pa_addr_lo = mem_descr->mem_array[idx].\ 3381 bus_address.u.a64.address; 3382 num_wrb_rings = mem_descr->mem_array[idx].size / 3383 (phba->params.wrbs_per_cxn * 3384 sizeof(struct iscsi_wrb)); 3385 pwrb_arr[num].virtual_address = wrb_vaddr; 3386 pwrb_arr[num].bus_address.u.a64.address\ 3387 = pa_addr_lo; 3388 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3389 sizeof(struct iscsi_wrb); 3390 wrb_vaddr += pwrb_arr[num].size; 3391 pa_addr_lo += pwrb_arr[num].size; 3392 num_wrb_rings--; 3393 } 3394 } 3395 3396 /* Get the ULP Count */ 3397 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3398 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3399 ulp_count++; 3400 ulp_base_num = ulp_num; 3401 cid_count_ulp[ulp_num] = 3402 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3403 } 3404 3405 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3406 if (ulp_count > 1) { 3407 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3408 3409 if (!cid_count_ulp[ulp_base_num]) 3410 ulp_base_num = (ulp_base_num + 1) % 3411 BEISCSI_ULP_COUNT; 3412 3413 cid_count_ulp[ulp_base_num]--; 3414 } 3415 3416 3417 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3418 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3419 &phwi_context->be_wrbq[i], 3420 &phwi_ctrlr->wrb_context[i], 3421 ulp_base_num); 3422 if (status != 0) { 3423 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3424 "BM_%d : wrbq create failed."); 3425 kfree(pwrb_arr); 3426 return status; 3427 } 3428 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3429 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3430 } 3431 kfree(pwrb_arr); 3432 return 0; 3433 } 3434 3435 static void free_wrb_handles(struct beiscsi_hba *phba) 3436 { 3437 unsigned int index; 3438 struct hwi_controller *phwi_ctrlr; 3439 struct hwi_wrb_context *pwrb_context; 3440 3441 phwi_ctrlr = phba->phwi_ctrlr; 3442 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3443 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3444 kfree(pwrb_context->pwrb_handle_base); 3445 kfree(pwrb_context->pwrb_handle_basestd); 3446 } 3447 } 3448 3449 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3450 { 3451 struct be_ctrl_info *ctrl = &phba->ctrl; 3452 struct be_dma_mem *ptag_mem; 3453 struct be_queue_info *q; 3454 int i, tag; 3455 3456 q = &phba->ctrl.mcc_obj.q; 3457 for (i = 0; i < MAX_MCC_CMD; i++) { 3458 tag = i + 1; 3459 if (!test_bit(MCC_TAG_STATE_RUNNING, 3460 &ctrl->ptag_state[tag].tag_state)) 3461 continue; 3462 3463 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3464 &ctrl->ptag_state[tag].tag_state)) { 3465 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3466 if (ptag_mem->size) { 3467 pci_free_consistent(ctrl->pdev, 3468 ptag_mem->size, 3469 ptag_mem->va, 3470 ptag_mem->dma); 3471 ptag_mem->size = 0; 3472 } 3473 continue; 3474 } 3475 /** 3476 * If MCC is still active and waiting then wake up the process. 3477 * We are here only because port is going offline. The process 3478 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3479 * returned for the operation and allocated memory cleaned up. 3480 */ 3481 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3482 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3483 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3484 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3485 /* 3486 * Control tag info gets reinitialized in enable 3487 * so wait for the process to clear running state. 3488 */ 3489 while (test_bit(MCC_TAG_STATE_RUNNING, 3490 &ctrl->ptag_state[tag].tag_state)) 3491 schedule_timeout_uninterruptible(HZ); 3492 } 3493 /** 3494 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3495 * MCC_TAG_STATE_IGNORE nothing needs to done. 3496 */ 3497 } 3498 if (q->created) { 3499 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3500 be_queue_free(phba, q); 3501 } 3502 3503 q = &phba->ctrl.mcc_obj.cq; 3504 if (q->created) { 3505 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3506 be_queue_free(phba, q); 3507 } 3508 } 3509 3510 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3511 struct hwi_context_memory *phwi_context) 3512 { 3513 struct be_queue_info *q, *cq; 3514 struct be_ctrl_info *ctrl = &phba->ctrl; 3515 3516 /* Alloc MCC compl queue */ 3517 cq = &phba->ctrl.mcc_obj.cq; 3518 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3519 sizeof(struct be_mcc_compl))) 3520 goto err; 3521 /* Ask BE to create MCC compl queue; */ 3522 if (phba->pcidev->msix_enabled) { 3523 if (beiscsi_cmd_cq_create(ctrl, cq, 3524 &phwi_context->be_eq[phba->num_cpus].q, 3525 false, true, 0)) 3526 goto mcc_cq_free; 3527 } else { 3528 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3529 false, true, 0)) 3530 goto mcc_cq_free; 3531 } 3532 3533 /* Alloc MCC queue */ 3534 q = &phba->ctrl.mcc_obj.q; 3535 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3536 goto mcc_cq_destroy; 3537 3538 /* Ask BE to create MCC queue */ 3539 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3540 goto mcc_q_free; 3541 3542 return 0; 3543 3544 mcc_q_free: 3545 be_queue_free(phba, q); 3546 mcc_cq_destroy: 3547 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3548 mcc_cq_free: 3549 be_queue_free(phba, cq); 3550 err: 3551 return -ENOMEM; 3552 } 3553 3554 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3555 { 3556 int nvec = 1; 3557 3558 switch (phba->generation) { 3559 case BE_GEN2: 3560 case BE_GEN3: 3561 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3562 break; 3563 case BE_GEN4: 3564 nvec = phba->fw_config.eqid_count; 3565 break; 3566 default: 3567 nvec = 2; 3568 break; 3569 } 3570 3571 /* if eqid_count == 1 fall back to INTX */ 3572 if (enable_msix && nvec > 1) { 3573 const struct irq_affinity desc = { .post_vectors = 1 }; 3574 3575 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3576 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3577 phba->num_cpus = nvec - 1; 3578 return; 3579 } 3580 } 3581 3582 phba->num_cpus = 1; 3583 } 3584 3585 static void hwi_purge_eq(struct beiscsi_hba *phba) 3586 { 3587 struct hwi_controller *phwi_ctrlr; 3588 struct hwi_context_memory *phwi_context; 3589 struct be_queue_info *eq; 3590 struct be_eq_entry *eqe = NULL; 3591 int i, eq_msix; 3592 unsigned int num_processed; 3593 3594 if (beiscsi_hba_in_error(phba)) 3595 return; 3596 3597 phwi_ctrlr = phba->phwi_ctrlr; 3598 phwi_context = phwi_ctrlr->phwi_ctxt; 3599 if (phba->pcidev->msix_enabled) 3600 eq_msix = 1; 3601 else 3602 eq_msix = 0; 3603 3604 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3605 eq = &phwi_context->be_eq[i].q; 3606 eqe = queue_tail_node(eq); 3607 num_processed = 0; 3608 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3609 & EQE_VALID_MASK) { 3610 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3611 queue_tail_inc(eq); 3612 eqe = queue_tail_node(eq); 3613 num_processed++; 3614 } 3615 3616 if (num_processed) 3617 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3618 } 3619 } 3620 3621 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3622 { 3623 struct be_queue_info *q; 3624 struct be_ctrl_info *ctrl = &phba->ctrl; 3625 struct hwi_controller *phwi_ctrlr; 3626 struct hwi_context_memory *phwi_context; 3627 int i, eq_for_mcc, ulp_num; 3628 3629 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3630 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3631 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3632 3633 /** 3634 * Purge all EQ entries that may have been left out. This is to 3635 * workaround a problem we've seen occasionally where driver gets an 3636 * interrupt with EQ entry bit set after stopping the controller. 3637 */ 3638 hwi_purge_eq(phba); 3639 3640 phwi_ctrlr = phba->phwi_ctrlr; 3641 phwi_context = phwi_ctrlr->phwi_ctxt; 3642 3643 be_cmd_iscsi_remove_template_hdr(ctrl); 3644 3645 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3646 q = &phwi_context->be_wrbq[i]; 3647 if (q->created) 3648 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3649 } 3650 kfree(phwi_context->be_wrbq); 3651 free_wrb_handles(phba); 3652 3653 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3654 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3655 3656 q = &phwi_context->be_def_hdrq[ulp_num]; 3657 if (q->created) 3658 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3659 3660 q = &phwi_context->be_def_dataq[ulp_num]; 3661 if (q->created) 3662 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3663 } 3664 } 3665 3666 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3667 3668 for (i = 0; i < (phba->num_cpus); i++) { 3669 q = &phwi_context->be_cq[i]; 3670 if (q->created) { 3671 be_queue_free(phba, q); 3672 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3673 } 3674 } 3675 3676 be_mcc_queues_destroy(phba); 3677 if (phba->pcidev->msix_enabled) 3678 eq_for_mcc = 1; 3679 else 3680 eq_for_mcc = 0; 3681 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3682 q = &phwi_context->be_eq[i].q; 3683 if (q->created) { 3684 be_queue_free(phba, q); 3685 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3686 } 3687 } 3688 /* this ensures complete FW cleanup */ 3689 beiscsi_cmd_function_reset(phba); 3690 /* last communication, indicate driver is unloading */ 3691 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3692 } 3693 3694 static int hwi_init_port(struct beiscsi_hba *phba) 3695 { 3696 struct hwi_controller *phwi_ctrlr; 3697 struct hwi_context_memory *phwi_context; 3698 unsigned int def_pdu_ring_sz; 3699 struct be_ctrl_info *ctrl = &phba->ctrl; 3700 int status, ulp_num; 3701 u16 nbufs; 3702 3703 phwi_ctrlr = phba->phwi_ctrlr; 3704 phwi_context = phwi_ctrlr->phwi_ctxt; 3705 /* set port optic state to unknown */ 3706 phba->optic_state = 0xff; 3707 3708 status = beiscsi_create_eqs(phba, phwi_context); 3709 if (status != 0) { 3710 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3711 "BM_%d : EQ not created\n"); 3712 goto error; 3713 } 3714 3715 status = be_mcc_queues_create(phba, phwi_context); 3716 if (status != 0) 3717 goto error; 3718 3719 status = beiscsi_check_supported_fw(ctrl, phba); 3720 if (status != 0) { 3721 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3722 "BM_%d : Unsupported fw version\n"); 3723 goto error; 3724 } 3725 3726 status = beiscsi_create_cqs(phba, phwi_context); 3727 if (status != 0) { 3728 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3729 "BM_%d : CQ not created\n"); 3730 goto error; 3731 } 3732 3733 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3734 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3735 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3736 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3737 3738 status = beiscsi_create_def_hdr(phba, phwi_context, 3739 phwi_ctrlr, 3740 def_pdu_ring_sz, 3741 ulp_num); 3742 if (status != 0) { 3743 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3744 "BM_%d : Default Header not created for ULP : %d\n", 3745 ulp_num); 3746 goto error; 3747 } 3748 3749 status = beiscsi_create_def_data(phba, phwi_context, 3750 phwi_ctrlr, 3751 def_pdu_ring_sz, 3752 ulp_num); 3753 if (status != 0) { 3754 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3755 "BM_%d : Default Data not created for ULP : %d\n", 3756 ulp_num); 3757 goto error; 3758 } 3759 /** 3760 * Now that the default PDU rings have been created, 3761 * let EP know about it. 3762 */ 3763 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3764 ulp_num, nbufs); 3765 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3766 ulp_num, nbufs); 3767 } 3768 } 3769 3770 status = beiscsi_post_pages(phba); 3771 if (status != 0) { 3772 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3773 "BM_%d : Post SGL Pages Failed\n"); 3774 goto error; 3775 } 3776 3777 status = beiscsi_post_template_hdr(phba); 3778 if (status != 0) { 3779 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3780 "BM_%d : Template HDR Posting for CXN Failed\n"); 3781 } 3782 3783 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3784 if (status != 0) { 3785 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3786 "BM_%d : WRB Rings not created\n"); 3787 goto error; 3788 } 3789 3790 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3791 uint16_t async_arr_idx = 0; 3792 3793 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3794 uint16_t cri = 0; 3795 struct hd_async_context *pasync_ctx; 3796 3797 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3798 phwi_ctrlr, ulp_num); 3799 for (cri = 0; cri < 3800 phba->params.cxns_per_ctrl; cri++) { 3801 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3802 (phwi_ctrlr, cri)) 3803 pasync_ctx->cid_to_async_cri_map[ 3804 phwi_ctrlr->wrb_context[cri].cid] = 3805 async_arr_idx++; 3806 } 3807 } 3808 } 3809 3810 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3811 "BM_%d : hwi_init_port success\n"); 3812 return 0; 3813 3814 error: 3815 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3816 "BM_%d : hwi_init_port failed"); 3817 hwi_cleanup_port(phba); 3818 return status; 3819 } 3820 3821 static int hwi_init_controller(struct beiscsi_hba *phba) 3822 { 3823 struct hwi_controller *phwi_ctrlr; 3824 3825 phwi_ctrlr = phba->phwi_ctrlr; 3826 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3827 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3828 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3829 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3830 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3831 phwi_ctrlr->phwi_ctxt); 3832 } else { 3833 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3834 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3835 "than one element.Failing to load\n"); 3836 return -ENOMEM; 3837 } 3838 3839 iscsi_init_global_templates(phba); 3840 if (beiscsi_init_wrb_handle(phba)) 3841 return -ENOMEM; 3842 3843 if (hwi_init_async_pdu_ctx(phba)) { 3844 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3845 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3846 return -ENOMEM; 3847 } 3848 3849 if (hwi_init_port(phba) != 0) { 3850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3851 "BM_%d : hwi_init_controller failed\n"); 3852 3853 return -ENOMEM; 3854 } 3855 return 0; 3856 } 3857 3858 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3859 { 3860 struct be_mem_descriptor *mem_descr; 3861 int i, j; 3862 3863 mem_descr = phba->init_mem; 3864 i = 0; 3865 j = 0; 3866 for (i = 0; i < SE_MEM_MAX; i++) { 3867 for (j = mem_descr->num_elements; j > 0; j--) { 3868 pci_free_consistent(phba->pcidev, 3869 mem_descr->mem_array[j - 1].size, 3870 mem_descr->mem_array[j - 1].virtual_address, 3871 (unsigned long)mem_descr->mem_array[j - 1]. 3872 bus_address.u.a64.address); 3873 } 3874 3875 kfree(mem_descr->mem_array); 3876 mem_descr++; 3877 } 3878 kfree(phba->init_mem); 3879 kfree(phba->phwi_ctrlr->wrb_context); 3880 kfree(phba->phwi_ctrlr); 3881 } 3882 3883 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3884 { 3885 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3886 struct sgl_handle *psgl_handle; 3887 struct iscsi_sge *pfrag; 3888 unsigned int arr_index, i, idx; 3889 unsigned int ulp_icd_start, ulp_num = 0; 3890 3891 phba->io_sgl_hndl_avbl = 0; 3892 phba->eh_sgl_hndl_avbl = 0; 3893 3894 mem_descr_sglh = phba->init_mem; 3895 mem_descr_sglh += HWI_MEM_SGLH; 3896 if (1 == mem_descr_sglh->num_elements) { 3897 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3898 phba->params.ios_per_ctrl, 3899 GFP_KERNEL); 3900 if (!phba->io_sgl_hndl_base) { 3901 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3902 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3903 return -ENOMEM; 3904 } 3905 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3906 (phba->params.icds_per_ctrl - 3907 phba->params.ios_per_ctrl), 3908 GFP_KERNEL); 3909 if (!phba->eh_sgl_hndl_base) { 3910 kfree(phba->io_sgl_hndl_base); 3911 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3912 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3913 return -ENOMEM; 3914 } 3915 } else { 3916 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3917 "BM_%d : HWI_MEM_SGLH is more than one element." 3918 "Failing to load\n"); 3919 return -ENOMEM; 3920 } 3921 3922 arr_index = 0; 3923 idx = 0; 3924 while (idx < mem_descr_sglh->num_elements) { 3925 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3926 3927 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3928 sizeof(struct sgl_handle)); i++) { 3929 if (arr_index < phba->params.ios_per_ctrl) { 3930 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3931 phba->io_sgl_hndl_avbl++; 3932 arr_index++; 3933 } else { 3934 phba->eh_sgl_hndl_base[arr_index - 3935 phba->params.ios_per_ctrl] = 3936 psgl_handle; 3937 arr_index++; 3938 phba->eh_sgl_hndl_avbl++; 3939 } 3940 psgl_handle++; 3941 } 3942 idx++; 3943 } 3944 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3945 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3946 "phba->eh_sgl_hndl_avbl=%d\n", 3947 phba->io_sgl_hndl_avbl, 3948 phba->eh_sgl_hndl_avbl); 3949 3950 mem_descr_sg = phba->init_mem; 3951 mem_descr_sg += HWI_MEM_SGE; 3952 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3953 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3954 mem_descr_sg->num_elements); 3955 3956 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3957 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3958 break; 3959 3960 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3961 3962 arr_index = 0; 3963 idx = 0; 3964 while (idx < mem_descr_sg->num_elements) { 3965 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3966 3967 for (i = 0; 3968 i < (mem_descr_sg->mem_array[idx].size) / 3969 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3970 i++) { 3971 if (arr_index < phba->params.ios_per_ctrl) 3972 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3973 else 3974 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3975 phba->params.ios_per_ctrl]; 3976 psgl_handle->pfrag = pfrag; 3977 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3978 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3979 pfrag += phba->params.num_sge_per_io; 3980 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3981 } 3982 idx++; 3983 } 3984 phba->io_sgl_free_index = 0; 3985 phba->io_sgl_alloc_index = 0; 3986 phba->eh_sgl_free_index = 0; 3987 phba->eh_sgl_alloc_index = 0; 3988 return 0; 3989 } 3990 3991 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3992 { 3993 int ret; 3994 uint16_t i, ulp_num; 3995 struct ulp_cid_info *ptr_cid_info = NULL; 3996 3997 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3998 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 3999 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4000 GFP_KERNEL); 4001 4002 if (!ptr_cid_info) { 4003 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4004 "BM_%d : Failed to allocate memory" 4005 "for ULP_CID_INFO for ULP : %d\n", 4006 ulp_num); 4007 ret = -ENOMEM; 4008 goto free_memory; 4009 4010 } 4011 4012 /* Allocate memory for CID array */ 4013 ptr_cid_info->cid_array = 4014 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4015 sizeof(*ptr_cid_info->cid_array), 4016 GFP_KERNEL); 4017 if (!ptr_cid_info->cid_array) { 4018 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4019 "BM_%d : Failed to allocate memory" 4020 "for CID_ARRAY for ULP : %d\n", 4021 ulp_num); 4022 kfree(ptr_cid_info); 4023 ptr_cid_info = NULL; 4024 ret = -ENOMEM; 4025 4026 goto free_memory; 4027 } 4028 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4029 phba, ulp_num); 4030 4031 /* Save the cid_info_array ptr */ 4032 phba->cid_array_info[ulp_num] = ptr_cid_info; 4033 } 4034 } 4035 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4036 phba->params.cxns_per_ctrl, GFP_KERNEL); 4037 if (!phba->ep_array) { 4038 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4039 "BM_%d : Failed to allocate memory in " 4040 "hba_setup_cid_tbls\n"); 4041 ret = -ENOMEM; 4042 4043 goto free_memory; 4044 } 4045 4046 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4047 phba->params.cxns_per_ctrl, GFP_KERNEL); 4048 if (!phba->conn_table) { 4049 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4050 "BM_%d : Failed to allocate memory in" 4051 "hba_setup_cid_tbls\n"); 4052 4053 kfree(phba->ep_array); 4054 phba->ep_array = NULL; 4055 ret = -ENOMEM; 4056 4057 goto free_memory; 4058 } 4059 4060 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4061 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4062 4063 ptr_cid_info = phba->cid_array_info[ulp_num]; 4064 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4065 phba->phwi_ctrlr->wrb_context[i].cid; 4066 4067 } 4068 4069 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4070 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4071 ptr_cid_info = phba->cid_array_info[ulp_num]; 4072 4073 ptr_cid_info->cid_alloc = 0; 4074 ptr_cid_info->cid_free = 0; 4075 } 4076 } 4077 return 0; 4078 4079 free_memory: 4080 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4081 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4082 ptr_cid_info = phba->cid_array_info[ulp_num]; 4083 4084 if (ptr_cid_info) { 4085 kfree(ptr_cid_info->cid_array); 4086 kfree(ptr_cid_info); 4087 phba->cid_array_info[ulp_num] = NULL; 4088 } 4089 } 4090 } 4091 4092 return ret; 4093 } 4094 4095 static void hwi_enable_intr(struct beiscsi_hba *phba) 4096 { 4097 struct be_ctrl_info *ctrl = &phba->ctrl; 4098 struct hwi_controller *phwi_ctrlr; 4099 struct hwi_context_memory *phwi_context; 4100 struct be_queue_info *eq; 4101 u8 __iomem *addr; 4102 u32 reg, i; 4103 u32 enabled; 4104 4105 phwi_ctrlr = phba->phwi_ctrlr; 4106 phwi_context = phwi_ctrlr->phwi_ctxt; 4107 4108 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4109 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4110 reg = ioread32(addr); 4111 4112 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4113 if (!enabled) { 4114 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4115 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4116 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4117 iowrite32(reg, addr); 4118 } 4119 4120 if (!phba->pcidev->msix_enabled) { 4121 eq = &phwi_context->be_eq[0].q; 4122 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4123 "BM_%d : eq->id=%d\n", eq->id); 4124 4125 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4126 } else { 4127 for (i = 0; i <= phba->num_cpus; i++) { 4128 eq = &phwi_context->be_eq[i].q; 4129 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4130 "BM_%d : eq->id=%d\n", eq->id); 4131 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4132 } 4133 } 4134 } 4135 4136 static void hwi_disable_intr(struct beiscsi_hba *phba) 4137 { 4138 struct be_ctrl_info *ctrl = &phba->ctrl; 4139 4140 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4141 u32 reg = ioread32(addr); 4142 4143 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4144 if (enabled) { 4145 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4146 iowrite32(reg, addr); 4147 } else 4148 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4149 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4150 } 4151 4152 static int beiscsi_init_port(struct beiscsi_hba *phba) 4153 { 4154 int ret; 4155 4156 ret = hwi_init_controller(phba); 4157 if (ret < 0) { 4158 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4159 "BM_%d : init controller failed\n"); 4160 return ret; 4161 } 4162 ret = beiscsi_init_sgl_handle(phba); 4163 if (ret < 0) { 4164 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4165 "BM_%d : init sgl handles failed\n"); 4166 goto cleanup_port; 4167 } 4168 4169 ret = hba_setup_cid_tbls(phba); 4170 if (ret < 0) { 4171 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4172 "BM_%d : setup CID table failed\n"); 4173 kfree(phba->io_sgl_hndl_base); 4174 kfree(phba->eh_sgl_hndl_base); 4175 goto cleanup_port; 4176 } 4177 return ret; 4178 4179 cleanup_port: 4180 hwi_cleanup_port(phba); 4181 return ret; 4182 } 4183 4184 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4185 { 4186 struct ulp_cid_info *ptr_cid_info = NULL; 4187 int ulp_num; 4188 4189 kfree(phba->io_sgl_hndl_base); 4190 kfree(phba->eh_sgl_hndl_base); 4191 kfree(phba->ep_array); 4192 kfree(phba->conn_table); 4193 4194 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4195 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4196 ptr_cid_info = phba->cid_array_info[ulp_num]; 4197 4198 if (ptr_cid_info) { 4199 kfree(ptr_cid_info->cid_array); 4200 kfree(ptr_cid_info); 4201 phba->cid_array_info[ulp_num] = NULL; 4202 } 4203 } 4204 } 4205 } 4206 4207 /** 4208 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4209 * @beiscsi_conn: ptr to the conn to be cleaned up 4210 * @task: ptr to iscsi_task resource to be freed. 4211 * 4212 * Free driver mgmt resources binded to CXN. 4213 **/ 4214 void 4215 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4216 struct iscsi_task *task) 4217 { 4218 struct beiscsi_io_task *io_task; 4219 struct beiscsi_hba *phba = beiscsi_conn->phba; 4220 struct hwi_wrb_context *pwrb_context; 4221 struct hwi_controller *phwi_ctrlr; 4222 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4223 beiscsi_conn->beiscsi_conn_cid); 4224 4225 phwi_ctrlr = phba->phwi_ctrlr; 4226 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4227 4228 io_task = task->dd_data; 4229 4230 if (io_task->pwrb_handle) { 4231 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4232 io_task->pwrb_handle = NULL; 4233 } 4234 4235 if (io_task->psgl_handle) { 4236 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4237 io_task->psgl_handle = NULL; 4238 } 4239 4240 if (io_task->mtask_addr) { 4241 pci_unmap_single(phba->pcidev, 4242 io_task->mtask_addr, 4243 io_task->mtask_data_count, 4244 PCI_DMA_TODEVICE); 4245 io_task->mtask_addr = 0; 4246 } 4247 } 4248 4249 /** 4250 * beiscsi_cleanup_task()- Free driver resources of the task 4251 * @task: ptr to the iscsi task 4252 * 4253 **/ 4254 static void beiscsi_cleanup_task(struct iscsi_task *task) 4255 { 4256 struct beiscsi_io_task *io_task = task->dd_data; 4257 struct iscsi_conn *conn = task->conn; 4258 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4259 struct beiscsi_hba *phba = beiscsi_conn->phba; 4260 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4261 struct hwi_wrb_context *pwrb_context; 4262 struct hwi_controller *phwi_ctrlr; 4263 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4264 beiscsi_conn->beiscsi_conn_cid); 4265 4266 phwi_ctrlr = phba->phwi_ctrlr; 4267 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4268 4269 if (io_task->cmd_bhs) { 4270 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4271 io_task->bhs_pa.u.a64.address); 4272 io_task->cmd_bhs = NULL; 4273 task->hdr = NULL; 4274 } 4275 4276 if (task->sc) { 4277 if (io_task->pwrb_handle) { 4278 free_wrb_handle(phba, pwrb_context, 4279 io_task->pwrb_handle); 4280 io_task->pwrb_handle = NULL; 4281 } 4282 4283 if (io_task->psgl_handle) { 4284 free_io_sgl_handle(phba, io_task->psgl_handle); 4285 io_task->psgl_handle = NULL; 4286 } 4287 4288 if (io_task->scsi_cmnd) { 4289 if (io_task->num_sg) 4290 scsi_dma_unmap(io_task->scsi_cmnd); 4291 io_task->scsi_cmnd = NULL; 4292 } 4293 } else { 4294 if (!beiscsi_conn->login_in_progress) 4295 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4296 } 4297 } 4298 4299 void 4300 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4301 struct beiscsi_offload_params *params) 4302 { 4303 struct wrb_handle *pwrb_handle; 4304 struct hwi_wrb_context *pwrb_context = NULL; 4305 struct beiscsi_hba *phba = beiscsi_conn->phba; 4306 struct iscsi_task *task = beiscsi_conn->task; 4307 struct iscsi_session *session = task->conn->session; 4308 u32 doorbell = 0; 4309 4310 /* 4311 * We can always use 0 here because it is reserved by libiscsi for 4312 * login/startup related tasks. 4313 */ 4314 beiscsi_conn->login_in_progress = 0; 4315 spin_lock_bh(&session->back_lock); 4316 beiscsi_cleanup_task(task); 4317 spin_unlock_bh(&session->back_lock); 4318 4319 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4320 &pwrb_context); 4321 4322 /* Check for the adapter family */ 4323 if (is_chip_be2_be3r(phba)) 4324 beiscsi_offload_cxn_v0(params, pwrb_handle, 4325 phba->init_mem, 4326 pwrb_context); 4327 else 4328 beiscsi_offload_cxn_v2(params, pwrb_handle, 4329 pwrb_context); 4330 4331 be_dws_le_to_cpu(pwrb_handle->pwrb, 4332 sizeof(struct iscsi_target_context_update_wrb)); 4333 4334 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4335 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4336 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4337 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4338 iowrite32(doorbell, phba->db_va + 4339 beiscsi_conn->doorbell_offset); 4340 4341 /* 4342 * There is no completion for CONTEXT_UPDATE. The completion of next 4343 * WRB posted guarantees FW's processing and DMA'ing of it. 4344 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4345 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4346 */ 4347 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4348 phba->params.wrbs_per_cxn); 4349 beiscsi_log(phba, KERN_INFO, 4350 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4351 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4352 pwrb_handle, pwrb_context->free_index, 4353 pwrb_context->wrb_handles_available); 4354 } 4355 4356 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4357 int *index, int *age) 4358 { 4359 *index = (int)itt; 4360 if (age) 4361 *age = conn->session->age; 4362 } 4363 4364 /** 4365 * beiscsi_alloc_pdu - allocates pdu and related resources 4366 * @task: libiscsi task 4367 * @opcode: opcode of pdu for task 4368 * 4369 * This is called with the session lock held. It will allocate 4370 * the wrb and sgl if needed for the command. And it will prep 4371 * the pdu's itt. beiscsi_parse_pdu will later translate 4372 * the pdu itt to the libiscsi task itt. 4373 */ 4374 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4375 { 4376 struct beiscsi_io_task *io_task = task->dd_data; 4377 struct iscsi_conn *conn = task->conn; 4378 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4379 struct beiscsi_hba *phba = beiscsi_conn->phba; 4380 struct hwi_wrb_context *pwrb_context; 4381 struct hwi_controller *phwi_ctrlr; 4382 itt_t itt; 4383 uint16_t cri_index = 0; 4384 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4385 dma_addr_t paddr; 4386 4387 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4388 GFP_ATOMIC, &paddr); 4389 if (!io_task->cmd_bhs) 4390 return -ENOMEM; 4391 io_task->bhs_pa.u.a64.address = paddr; 4392 io_task->libiscsi_itt = (itt_t)task->itt; 4393 io_task->conn = beiscsi_conn; 4394 4395 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4396 task->hdr_max = sizeof(struct be_cmd_bhs); 4397 io_task->psgl_handle = NULL; 4398 io_task->pwrb_handle = NULL; 4399 4400 if (task->sc) { 4401 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4402 if (!io_task->psgl_handle) { 4403 beiscsi_log(phba, KERN_ERR, 4404 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4405 "BM_%d : Alloc of IO_SGL_ICD Failed" 4406 "for the CID : %d\n", 4407 beiscsi_conn->beiscsi_conn_cid); 4408 goto free_hndls; 4409 } 4410 io_task->pwrb_handle = alloc_wrb_handle(phba, 4411 beiscsi_conn->beiscsi_conn_cid, 4412 &io_task->pwrb_context); 4413 if (!io_task->pwrb_handle) { 4414 beiscsi_log(phba, KERN_ERR, 4415 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4416 "BM_%d : Alloc of WRB_HANDLE Failed" 4417 "for the CID : %d\n", 4418 beiscsi_conn->beiscsi_conn_cid); 4419 goto free_io_hndls; 4420 } 4421 } else { 4422 io_task->scsi_cmnd = NULL; 4423 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4424 beiscsi_conn->task = task; 4425 if (!beiscsi_conn->login_in_progress) { 4426 io_task->psgl_handle = (struct sgl_handle *) 4427 alloc_mgmt_sgl_handle(phba); 4428 if (!io_task->psgl_handle) { 4429 beiscsi_log(phba, KERN_ERR, 4430 BEISCSI_LOG_IO | 4431 BEISCSI_LOG_CONFIG, 4432 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4433 "for the CID : %d\n", 4434 beiscsi_conn-> 4435 beiscsi_conn_cid); 4436 goto free_hndls; 4437 } 4438 4439 beiscsi_conn->login_in_progress = 1; 4440 beiscsi_conn->plogin_sgl_handle = 4441 io_task->psgl_handle; 4442 io_task->pwrb_handle = 4443 alloc_wrb_handle(phba, 4444 beiscsi_conn->beiscsi_conn_cid, 4445 &io_task->pwrb_context); 4446 if (!io_task->pwrb_handle) { 4447 beiscsi_log(phba, KERN_ERR, 4448 BEISCSI_LOG_IO | 4449 BEISCSI_LOG_CONFIG, 4450 "BM_%d : Alloc of WRB_HANDLE Failed" 4451 "for the CID : %d\n", 4452 beiscsi_conn-> 4453 beiscsi_conn_cid); 4454 goto free_mgmt_hndls; 4455 } 4456 beiscsi_conn->plogin_wrb_handle = 4457 io_task->pwrb_handle; 4458 4459 } else { 4460 io_task->psgl_handle = 4461 beiscsi_conn->plogin_sgl_handle; 4462 io_task->pwrb_handle = 4463 beiscsi_conn->plogin_wrb_handle; 4464 } 4465 } else { 4466 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4467 if (!io_task->psgl_handle) { 4468 beiscsi_log(phba, KERN_ERR, 4469 BEISCSI_LOG_IO | 4470 BEISCSI_LOG_CONFIG, 4471 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4472 "for the CID : %d\n", 4473 beiscsi_conn-> 4474 beiscsi_conn_cid); 4475 goto free_hndls; 4476 } 4477 io_task->pwrb_handle = 4478 alloc_wrb_handle(phba, 4479 beiscsi_conn->beiscsi_conn_cid, 4480 &io_task->pwrb_context); 4481 if (!io_task->pwrb_handle) { 4482 beiscsi_log(phba, KERN_ERR, 4483 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4484 "BM_%d : Alloc of WRB_HANDLE Failed" 4485 "for the CID : %d\n", 4486 beiscsi_conn->beiscsi_conn_cid); 4487 goto free_mgmt_hndls; 4488 } 4489 4490 } 4491 } 4492 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4493 wrb_index << 16) | (unsigned int) 4494 (io_task->psgl_handle->sgl_index)); 4495 io_task->pwrb_handle->pio_handle = task; 4496 4497 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4498 return 0; 4499 4500 free_io_hndls: 4501 free_io_sgl_handle(phba, io_task->psgl_handle); 4502 goto free_hndls; 4503 free_mgmt_hndls: 4504 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4505 io_task->psgl_handle = NULL; 4506 free_hndls: 4507 phwi_ctrlr = phba->phwi_ctrlr; 4508 cri_index = BE_GET_CRI_FROM_CID( 4509 beiscsi_conn->beiscsi_conn_cid); 4510 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4511 if (io_task->pwrb_handle) 4512 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4513 io_task->pwrb_handle = NULL; 4514 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4515 io_task->bhs_pa.u.a64.address); 4516 io_task->cmd_bhs = NULL; 4517 return -ENOMEM; 4518 } 4519 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4520 unsigned int num_sg, unsigned int xferlen, 4521 unsigned int writedir) 4522 { 4523 4524 struct beiscsi_io_task *io_task = task->dd_data; 4525 struct iscsi_conn *conn = task->conn; 4526 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4527 struct beiscsi_hba *phba = beiscsi_conn->phba; 4528 struct iscsi_wrb *pwrb = NULL; 4529 unsigned int doorbell = 0; 4530 4531 pwrb = io_task->pwrb_handle->pwrb; 4532 4533 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4534 4535 if (writedir) { 4536 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4537 INI_WR_CMD); 4538 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4539 } else { 4540 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4541 INI_RD_CMD); 4542 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4543 } 4544 4545 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4546 type, pwrb); 4547 4548 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4549 cpu_to_be16(*(unsigned short *) 4550 &io_task->cmd_bhs->iscsi_hdr.lun)); 4551 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4552 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4553 io_task->pwrb_handle->wrb_index); 4554 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4555 be32_to_cpu(task->cmdsn)); 4556 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4557 io_task->psgl_handle->sgl_index); 4558 4559 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4560 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4561 io_task->pwrb_handle->wrb_index); 4562 if (io_task->pwrb_context->plast_wrb) 4563 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4564 io_task->pwrb_context->plast_wrb, 4565 io_task->pwrb_handle->wrb_index); 4566 io_task->pwrb_context->plast_wrb = pwrb; 4567 4568 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4569 4570 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4571 doorbell |= (io_task->pwrb_handle->wrb_index & 4572 DB_DEF_PDU_WRB_INDEX_MASK) << 4573 DB_DEF_PDU_WRB_INDEX_SHIFT; 4574 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4575 iowrite32(doorbell, phba->db_va + 4576 beiscsi_conn->doorbell_offset); 4577 return 0; 4578 } 4579 4580 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4581 unsigned int num_sg, unsigned int xferlen, 4582 unsigned int writedir) 4583 { 4584 4585 struct beiscsi_io_task *io_task = task->dd_data; 4586 struct iscsi_conn *conn = task->conn; 4587 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4588 struct beiscsi_hba *phba = beiscsi_conn->phba; 4589 struct iscsi_wrb *pwrb = NULL; 4590 unsigned int doorbell = 0; 4591 4592 pwrb = io_task->pwrb_handle->pwrb; 4593 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4594 4595 if (writedir) { 4596 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4597 INI_WR_CMD); 4598 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4599 } else { 4600 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4601 INI_RD_CMD); 4602 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4603 } 4604 4605 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4606 type, pwrb); 4607 4608 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4609 cpu_to_be16(*(unsigned short *) 4610 &io_task->cmd_bhs->iscsi_hdr.lun)); 4611 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4612 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4613 io_task->pwrb_handle->wrb_index); 4614 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4615 be32_to_cpu(task->cmdsn)); 4616 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4617 io_task->psgl_handle->sgl_index); 4618 4619 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4620 4621 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4622 io_task->pwrb_handle->wrb_index); 4623 if (io_task->pwrb_context->plast_wrb) 4624 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4625 io_task->pwrb_context->plast_wrb, 4626 io_task->pwrb_handle->wrb_index); 4627 io_task->pwrb_context->plast_wrb = pwrb; 4628 4629 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4630 4631 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4632 doorbell |= (io_task->pwrb_handle->wrb_index & 4633 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4634 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4635 4636 iowrite32(doorbell, phba->db_va + 4637 beiscsi_conn->doorbell_offset); 4638 return 0; 4639 } 4640 4641 static int beiscsi_mtask(struct iscsi_task *task) 4642 { 4643 struct beiscsi_io_task *io_task = task->dd_data; 4644 struct iscsi_conn *conn = task->conn; 4645 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4646 struct beiscsi_hba *phba = beiscsi_conn->phba; 4647 struct iscsi_wrb *pwrb = NULL; 4648 unsigned int doorbell = 0; 4649 unsigned int cid; 4650 unsigned int pwrb_typeoffset = 0; 4651 int ret = 0; 4652 4653 cid = beiscsi_conn->beiscsi_conn_cid; 4654 pwrb = io_task->pwrb_handle->pwrb; 4655 4656 if (is_chip_be2_be3r(phba)) { 4657 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4658 be32_to_cpu(task->cmdsn)); 4659 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4660 io_task->pwrb_handle->wrb_index); 4661 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4662 io_task->psgl_handle->sgl_index); 4663 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4664 task->data_count); 4665 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4666 io_task->pwrb_handle->wrb_index); 4667 if (io_task->pwrb_context->plast_wrb) 4668 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4669 io_task->pwrb_context->plast_wrb, 4670 io_task->pwrb_handle->wrb_index); 4671 io_task->pwrb_context->plast_wrb = pwrb; 4672 4673 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4674 } else { 4675 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4676 be32_to_cpu(task->cmdsn)); 4677 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4678 io_task->pwrb_handle->wrb_index); 4679 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4680 io_task->psgl_handle->sgl_index); 4681 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4682 task->data_count); 4683 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4684 io_task->pwrb_handle->wrb_index); 4685 if (io_task->pwrb_context->plast_wrb) 4686 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4687 io_task->pwrb_context->plast_wrb, 4688 io_task->pwrb_handle->wrb_index); 4689 io_task->pwrb_context->plast_wrb = pwrb; 4690 4691 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4692 } 4693 4694 4695 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4696 case ISCSI_OP_LOGIN: 4697 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4698 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4699 ret = hwi_write_buffer(pwrb, task); 4700 break; 4701 case ISCSI_OP_NOOP_OUT: 4702 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4703 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4704 if (is_chip_be2_be3r(phba)) 4705 AMAP_SET_BITS(struct amap_iscsi_wrb, 4706 dmsg, pwrb, 1); 4707 else 4708 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4709 dmsg, pwrb, 1); 4710 } else { 4711 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4712 if (is_chip_be2_be3r(phba)) 4713 AMAP_SET_BITS(struct amap_iscsi_wrb, 4714 dmsg, pwrb, 0); 4715 else 4716 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4717 dmsg, pwrb, 0); 4718 } 4719 ret = hwi_write_buffer(pwrb, task); 4720 break; 4721 case ISCSI_OP_TEXT: 4722 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4723 ret = hwi_write_buffer(pwrb, task); 4724 break; 4725 case ISCSI_OP_SCSI_TMFUNC: 4726 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4727 ret = hwi_write_buffer(pwrb, task); 4728 break; 4729 case ISCSI_OP_LOGOUT: 4730 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4731 ret = hwi_write_buffer(pwrb, task); 4732 break; 4733 4734 default: 4735 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4736 "BM_%d : opcode =%d Not supported\n", 4737 task->hdr->opcode & ISCSI_OPCODE_MASK); 4738 4739 return -EINVAL; 4740 } 4741 4742 if (ret) 4743 return ret; 4744 4745 /* Set the task type */ 4746 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4747 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4748 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4749 4750 doorbell |= cid & DB_WRB_POST_CID_MASK; 4751 doorbell |= (io_task->pwrb_handle->wrb_index & 4752 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4753 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4754 iowrite32(doorbell, phba->db_va + 4755 beiscsi_conn->doorbell_offset); 4756 return 0; 4757 } 4758 4759 static int beiscsi_task_xmit(struct iscsi_task *task) 4760 { 4761 struct beiscsi_io_task *io_task = task->dd_data; 4762 struct scsi_cmnd *sc = task->sc; 4763 struct beiscsi_hba *phba; 4764 struct scatterlist *sg; 4765 int num_sg; 4766 unsigned int writedir = 0, xferlen = 0; 4767 4768 phba = io_task->conn->phba; 4769 /** 4770 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4771 * operational if FW still gets heartbeat from EP FW. Is management 4772 * path really needed to continue further? 4773 */ 4774 if (!beiscsi_hba_is_online(phba)) 4775 return -EIO; 4776 4777 if (!io_task->conn->login_in_progress) 4778 task->hdr->exp_statsn = 0; 4779 4780 if (!sc) 4781 return beiscsi_mtask(task); 4782 4783 io_task->scsi_cmnd = sc; 4784 io_task->num_sg = 0; 4785 num_sg = scsi_dma_map(sc); 4786 if (num_sg < 0) { 4787 beiscsi_log(phba, KERN_ERR, 4788 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4789 "BM_%d : scsi_dma_map Failed " 4790 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4791 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4792 io_task->libiscsi_itt, scsi_bufflen(sc)); 4793 4794 return num_sg; 4795 } 4796 /** 4797 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4798 * For management task, cleanup_task checks mtask_addr before unmapping. 4799 */ 4800 io_task->num_sg = num_sg; 4801 xferlen = scsi_bufflen(sc); 4802 sg = scsi_sglist(sc); 4803 if (sc->sc_data_direction == DMA_TO_DEVICE) 4804 writedir = 1; 4805 else 4806 writedir = 0; 4807 4808 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4809 } 4810 4811 /** 4812 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4813 * @job: job to handle 4814 */ 4815 static int beiscsi_bsg_request(struct bsg_job *job) 4816 { 4817 struct Scsi_Host *shost; 4818 struct beiscsi_hba *phba; 4819 struct iscsi_bsg_request *bsg_req = job->request; 4820 int rc = -EINVAL; 4821 unsigned int tag; 4822 struct be_dma_mem nonemb_cmd; 4823 struct be_cmd_resp_hdr *resp; 4824 struct iscsi_bsg_reply *bsg_reply = job->reply; 4825 unsigned short status, extd_status; 4826 4827 shost = iscsi_job_to_shost(job); 4828 phba = iscsi_host_priv(shost); 4829 4830 if (!beiscsi_hba_is_online(phba)) { 4831 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4832 "BM_%d : HBA in error 0x%lx\n", phba->state); 4833 return -ENXIO; 4834 } 4835 4836 switch (bsg_req->msgcode) { 4837 case ISCSI_BSG_HST_VENDOR: 4838 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4839 job->request_payload.payload_len, 4840 &nonemb_cmd.dma); 4841 if (nonemb_cmd.va == NULL) { 4842 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4843 "BM_%d : Failed to allocate memory for " 4844 "beiscsi_bsg_request\n"); 4845 return -ENOMEM; 4846 } 4847 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4848 &nonemb_cmd); 4849 if (!tag) { 4850 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4851 "BM_%d : MBX Tag Allocation Failed\n"); 4852 4853 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4854 nonemb_cmd.va, nonemb_cmd.dma); 4855 return -EAGAIN; 4856 } 4857 4858 rc = wait_event_interruptible_timeout( 4859 phba->ctrl.mcc_wait[tag], 4860 phba->ctrl.mcc_tag_status[tag], 4861 msecs_to_jiffies( 4862 BEISCSI_HOST_MBX_TIMEOUT)); 4863 4864 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4865 clear_bit(MCC_TAG_STATE_RUNNING, 4866 &phba->ctrl.ptag_state[tag].tag_state); 4867 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4868 nonemb_cmd.va, nonemb_cmd.dma); 4869 return -EIO; 4870 } 4871 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4872 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4873 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4874 free_mcc_wrb(&phba->ctrl, tag); 4875 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4876 sg_copy_from_buffer(job->reply_payload.sg_list, 4877 job->reply_payload.sg_cnt, 4878 nonemb_cmd.va, (resp->response_length 4879 + sizeof(*resp))); 4880 bsg_reply->reply_payload_rcv_len = resp->response_length; 4881 bsg_reply->result = status; 4882 bsg_job_done(job, bsg_reply->result, 4883 bsg_reply->reply_payload_rcv_len); 4884 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4885 nonemb_cmd.va, nonemb_cmd.dma); 4886 if (status || extd_status) { 4887 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4888 "BM_%d : MBX Cmd Failed" 4889 " status = %d extd_status = %d\n", 4890 status, extd_status); 4891 4892 return -EIO; 4893 } else { 4894 rc = 0; 4895 } 4896 break; 4897 4898 default: 4899 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4900 "BM_%d : Unsupported bsg command: 0x%x\n", 4901 bsg_req->msgcode); 4902 break; 4903 } 4904 4905 return rc; 4906 } 4907 4908 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4909 { 4910 /* Set the logging parameter */ 4911 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4912 } 4913 4914 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4915 { 4916 if (phba->boot_struct.boot_kset) 4917 return; 4918 4919 /* skip if boot work is already in progress */ 4920 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4921 return; 4922 4923 phba->boot_struct.retry = 3; 4924 phba->boot_struct.tag = 0; 4925 phba->boot_struct.s_handle = s_handle; 4926 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4927 schedule_work(&phba->boot_work); 4928 } 4929 4930 /** 4931 * Boot flag info for iscsi-utilities 4932 * Bit 0 Block valid flag 4933 * Bit 1 Firmware booting selected 4934 */ 4935 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4936 4937 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4938 { 4939 struct beiscsi_hba *phba = data; 4940 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4941 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4942 char *str = buf; 4943 int rc = -EPERM; 4944 4945 switch (type) { 4946 case ISCSI_BOOT_TGT_NAME: 4947 rc = sprintf(buf, "%.*s\n", 4948 (int)strlen(boot_sess->target_name), 4949 (char *)&boot_sess->target_name); 4950 break; 4951 case ISCSI_BOOT_TGT_IP_ADDR: 4952 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4953 rc = sprintf(buf, "%pI4\n", 4954 (char *)&boot_conn->dest_ipaddr.addr); 4955 else 4956 rc = sprintf(str, "%pI6\n", 4957 (char *)&boot_conn->dest_ipaddr.addr); 4958 break; 4959 case ISCSI_BOOT_TGT_PORT: 4960 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4961 break; 4962 4963 case ISCSI_BOOT_TGT_CHAP_NAME: 4964 rc = sprintf(str, "%.*s\n", 4965 boot_conn->negotiated_login_options.auth_data.chap. 4966 target_chap_name_length, 4967 (char *)&boot_conn->negotiated_login_options. 4968 auth_data.chap.target_chap_name); 4969 break; 4970 case ISCSI_BOOT_TGT_CHAP_SECRET: 4971 rc = sprintf(str, "%.*s\n", 4972 boot_conn->negotiated_login_options.auth_data.chap. 4973 target_secret_length, 4974 (char *)&boot_conn->negotiated_login_options. 4975 auth_data.chap.target_secret); 4976 break; 4977 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4978 rc = sprintf(str, "%.*s\n", 4979 boot_conn->negotiated_login_options.auth_data.chap. 4980 intr_chap_name_length, 4981 (char *)&boot_conn->negotiated_login_options. 4982 auth_data.chap.intr_chap_name); 4983 break; 4984 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4985 rc = sprintf(str, "%.*s\n", 4986 boot_conn->negotiated_login_options.auth_data.chap. 4987 intr_secret_length, 4988 (char *)&boot_conn->negotiated_login_options. 4989 auth_data.chap.intr_secret); 4990 break; 4991 case ISCSI_BOOT_TGT_FLAGS: 4992 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4993 break; 4994 case ISCSI_BOOT_TGT_NIC_ASSOC: 4995 rc = sprintf(str, "0\n"); 4996 break; 4997 } 4998 return rc; 4999 } 5000 5001 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 5002 { 5003 struct beiscsi_hba *phba = data; 5004 char *str = buf; 5005 int rc = -EPERM; 5006 5007 switch (type) { 5008 case ISCSI_BOOT_INI_INITIATOR_NAME: 5009 rc = sprintf(str, "%s\n", 5010 phba->boot_struct.boot_sess.initiator_iscsiname); 5011 break; 5012 } 5013 return rc; 5014 } 5015 5016 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5017 { 5018 struct beiscsi_hba *phba = data; 5019 char *str = buf; 5020 int rc = -EPERM; 5021 5022 switch (type) { 5023 case ISCSI_BOOT_ETH_FLAGS: 5024 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5025 break; 5026 case ISCSI_BOOT_ETH_INDEX: 5027 rc = sprintf(str, "0\n"); 5028 break; 5029 case ISCSI_BOOT_ETH_MAC: 5030 rc = beiscsi_get_macaddr(str, phba); 5031 break; 5032 } 5033 return rc; 5034 } 5035 5036 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5037 { 5038 umode_t rc = 0; 5039 5040 switch (type) { 5041 case ISCSI_BOOT_TGT_NAME: 5042 case ISCSI_BOOT_TGT_IP_ADDR: 5043 case ISCSI_BOOT_TGT_PORT: 5044 case ISCSI_BOOT_TGT_CHAP_NAME: 5045 case ISCSI_BOOT_TGT_CHAP_SECRET: 5046 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5047 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5048 case ISCSI_BOOT_TGT_NIC_ASSOC: 5049 case ISCSI_BOOT_TGT_FLAGS: 5050 rc = S_IRUGO; 5051 break; 5052 } 5053 return rc; 5054 } 5055 5056 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5057 { 5058 umode_t rc = 0; 5059 5060 switch (type) { 5061 case ISCSI_BOOT_INI_INITIATOR_NAME: 5062 rc = S_IRUGO; 5063 break; 5064 } 5065 return rc; 5066 } 5067 5068 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5069 { 5070 umode_t rc = 0; 5071 5072 switch (type) { 5073 case ISCSI_BOOT_ETH_FLAGS: 5074 case ISCSI_BOOT_ETH_MAC: 5075 case ISCSI_BOOT_ETH_INDEX: 5076 rc = S_IRUGO; 5077 break; 5078 } 5079 return rc; 5080 } 5081 5082 static void beiscsi_boot_kobj_release(void *data) 5083 { 5084 struct beiscsi_hba *phba = data; 5085 5086 scsi_host_put(phba->shost); 5087 } 5088 5089 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5090 { 5091 struct boot_struct *bs = &phba->boot_struct; 5092 struct iscsi_boot_kobj *boot_kobj; 5093 5094 if (bs->boot_kset) { 5095 __beiscsi_log(phba, KERN_ERR, 5096 "BM_%d: boot_kset already created\n"); 5097 return 0; 5098 } 5099 5100 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5101 if (!bs->boot_kset) { 5102 __beiscsi_log(phba, KERN_ERR, 5103 "BM_%d: boot_kset alloc failed\n"); 5104 return -ENOMEM; 5105 } 5106 5107 /* get shost ref because the show function will refer phba */ 5108 if (!scsi_host_get(phba->shost)) 5109 goto free_kset; 5110 5111 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5112 beiscsi_show_boot_tgt_info, 5113 beiscsi_tgt_get_attr_visibility, 5114 beiscsi_boot_kobj_release); 5115 if (!boot_kobj) 5116 goto put_shost; 5117 5118 if (!scsi_host_get(phba->shost)) 5119 goto free_kset; 5120 5121 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5122 beiscsi_show_boot_ini_info, 5123 beiscsi_ini_get_attr_visibility, 5124 beiscsi_boot_kobj_release); 5125 if (!boot_kobj) 5126 goto put_shost; 5127 5128 if (!scsi_host_get(phba->shost)) 5129 goto free_kset; 5130 5131 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5132 beiscsi_show_boot_eth_info, 5133 beiscsi_eth_get_attr_visibility, 5134 beiscsi_boot_kobj_release); 5135 if (!boot_kobj) 5136 goto put_shost; 5137 5138 return 0; 5139 5140 put_shost: 5141 scsi_host_put(phba->shost); 5142 free_kset: 5143 iscsi_boot_destroy_kset(bs->boot_kset); 5144 bs->boot_kset = NULL; 5145 return -ENOMEM; 5146 } 5147 5148 static void beiscsi_boot_work(struct work_struct *work) 5149 { 5150 struct beiscsi_hba *phba = 5151 container_of(work, struct beiscsi_hba, boot_work); 5152 struct boot_struct *bs = &phba->boot_struct; 5153 unsigned int tag = 0; 5154 5155 if (!beiscsi_hba_is_online(phba)) 5156 return; 5157 5158 beiscsi_log(phba, KERN_INFO, 5159 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5160 "BM_%d : %s action %d\n", 5161 __func__, phba->boot_struct.action); 5162 5163 switch (phba->boot_struct.action) { 5164 case BEISCSI_BOOT_REOPEN_SESS: 5165 tag = beiscsi_boot_reopen_sess(phba); 5166 break; 5167 case BEISCSI_BOOT_GET_SHANDLE: 5168 tag = __beiscsi_boot_get_shandle(phba, 1); 5169 break; 5170 case BEISCSI_BOOT_GET_SINFO: 5171 tag = beiscsi_boot_get_sinfo(phba); 5172 break; 5173 case BEISCSI_BOOT_LOGOUT_SESS: 5174 tag = beiscsi_boot_logout_sess(phba); 5175 break; 5176 case BEISCSI_BOOT_CREATE_KSET: 5177 beiscsi_boot_create_kset(phba); 5178 /** 5179 * updated boot_kset is made visible to all before 5180 * ending the boot work. 5181 */ 5182 mb(); 5183 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5184 return; 5185 } 5186 if (!tag) { 5187 if (bs->retry--) 5188 schedule_work(&phba->boot_work); 5189 else 5190 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5191 } 5192 } 5193 5194 static void beiscsi_eqd_update_work(struct work_struct *work) 5195 { 5196 struct hwi_context_memory *phwi_context; 5197 struct be_set_eqd set_eqd[MAX_CPUS]; 5198 struct hwi_controller *phwi_ctrlr; 5199 struct be_eq_obj *pbe_eq; 5200 struct beiscsi_hba *phba; 5201 unsigned int pps, delta; 5202 struct be_aic_obj *aic; 5203 int eqd, i, num = 0; 5204 unsigned long now; 5205 5206 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5207 if (!beiscsi_hba_is_online(phba)) 5208 return; 5209 5210 phwi_ctrlr = phba->phwi_ctrlr; 5211 phwi_context = phwi_ctrlr->phwi_ctxt; 5212 5213 for (i = 0; i <= phba->num_cpus; i++) { 5214 aic = &phba->aic_obj[i]; 5215 pbe_eq = &phwi_context->be_eq[i]; 5216 now = jiffies; 5217 if (!aic->jiffies || time_before(now, aic->jiffies) || 5218 pbe_eq->cq_count < aic->eq_prev) { 5219 aic->jiffies = now; 5220 aic->eq_prev = pbe_eq->cq_count; 5221 continue; 5222 } 5223 delta = jiffies_to_msecs(now - aic->jiffies); 5224 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5225 eqd = (pps / 1500) << 2; 5226 5227 if (eqd < 8) 5228 eqd = 0; 5229 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5230 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5231 5232 aic->jiffies = now; 5233 aic->eq_prev = pbe_eq->cq_count; 5234 5235 if (eqd != aic->prev_eqd) { 5236 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5237 set_eqd[num].eq_id = pbe_eq->q.id; 5238 aic->prev_eqd = eqd; 5239 num++; 5240 } 5241 } 5242 if (num) 5243 /* completion of this is ignored */ 5244 beiscsi_modify_eq_delay(phba, set_eqd, num); 5245 5246 schedule_delayed_work(&phba->eqd_update, 5247 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5248 } 5249 5250 static void beiscsi_hw_tpe_check(struct timer_list *t) 5251 { 5252 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5253 u32 wait; 5254 5255 /* if not TPE, do nothing */ 5256 if (!beiscsi_detect_tpe(phba)) 5257 return; 5258 5259 /* wait default 4000ms before recovering */ 5260 wait = 4000; 5261 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5262 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5263 queue_delayed_work(phba->wq, &phba->recover_port, 5264 msecs_to_jiffies(wait)); 5265 } 5266 5267 static void beiscsi_hw_health_check(struct timer_list *t) 5268 { 5269 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5270 5271 beiscsi_detect_ue(phba); 5272 if (beiscsi_detect_ue(phba)) { 5273 __beiscsi_log(phba, KERN_ERR, 5274 "BM_%d : port in error: %lx\n", phba->state); 5275 /* sessions are no longer valid, so first fail the sessions */ 5276 queue_work(phba->wq, &phba->sess_work); 5277 5278 /* detect UER supported */ 5279 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5280 return; 5281 /* modify this timer to check TPE */ 5282 phba->hw_check.function = beiscsi_hw_tpe_check; 5283 } 5284 5285 mod_timer(&phba->hw_check, 5286 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5287 } 5288 5289 /* 5290 * beiscsi_enable_port()- Enables the disabled port. 5291 * Only port resources freed in disable function are reallocated. 5292 * This is called in HBA error handling path. 5293 * 5294 * @phba: Instance of driver private structure 5295 * 5296 **/ 5297 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5298 { 5299 struct hwi_context_memory *phwi_context; 5300 struct hwi_controller *phwi_ctrlr; 5301 struct be_eq_obj *pbe_eq; 5302 int ret, i; 5303 5304 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5305 __beiscsi_log(phba, KERN_ERR, 5306 "BM_%d : %s : port is online %lx\n", 5307 __func__, phba->state); 5308 return 0; 5309 } 5310 5311 ret = beiscsi_init_sliport(phba); 5312 if (ret) 5313 return ret; 5314 5315 be2iscsi_enable_msix(phba); 5316 5317 beiscsi_get_params(phba); 5318 beiscsi_set_host_data(phba); 5319 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5320 beiscsi_set_uer_feature(phba); 5321 5322 phba->shost->max_id = phba->params.cxns_per_ctrl; 5323 phba->shost->can_queue = phba->params.ios_per_ctrl; 5324 ret = beiscsi_init_port(phba); 5325 if (ret < 0) { 5326 __beiscsi_log(phba, KERN_ERR, 5327 "BM_%d : init port failed\n"); 5328 goto disable_msix; 5329 } 5330 5331 for (i = 0; i < MAX_MCC_CMD; i++) { 5332 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5333 phba->ctrl.mcc_tag[i] = i + 1; 5334 phba->ctrl.mcc_tag_status[i + 1] = 0; 5335 phba->ctrl.mcc_tag_available++; 5336 } 5337 5338 phwi_ctrlr = phba->phwi_ctrlr; 5339 phwi_context = phwi_ctrlr->phwi_ctxt; 5340 for (i = 0; i < phba->num_cpus; i++) { 5341 pbe_eq = &phwi_context->be_eq[i]; 5342 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5343 } 5344 5345 i = (phba->pcidev->msix_enabled) ? i : 0; 5346 /* Work item for MCC handling */ 5347 pbe_eq = &phwi_context->be_eq[i]; 5348 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5349 5350 ret = beiscsi_init_irqs(phba); 5351 if (ret < 0) { 5352 __beiscsi_log(phba, KERN_ERR, 5353 "BM_%d : setup IRQs failed %d\n", ret); 5354 goto cleanup_port; 5355 } 5356 hwi_enable_intr(phba); 5357 /* port operational: clear all error bits */ 5358 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5359 __beiscsi_log(phba, KERN_INFO, 5360 "BM_%d : port online: 0x%lx\n", phba->state); 5361 5362 /* start hw_check timer and eqd_update work */ 5363 schedule_delayed_work(&phba->eqd_update, 5364 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5365 5366 /** 5367 * Timer function gets modified for TPE detection. 5368 * Always reinit to do health check first. 5369 */ 5370 phba->hw_check.function = beiscsi_hw_health_check; 5371 mod_timer(&phba->hw_check, 5372 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5373 return 0; 5374 5375 cleanup_port: 5376 for (i = 0; i < phba->num_cpus; i++) { 5377 pbe_eq = &phwi_context->be_eq[i]; 5378 irq_poll_disable(&pbe_eq->iopoll); 5379 } 5380 hwi_cleanup_port(phba); 5381 5382 disable_msix: 5383 pci_free_irq_vectors(phba->pcidev); 5384 return ret; 5385 } 5386 5387 /* 5388 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5389 * This is called in HBA error handling and driver removal. 5390 * @phba: Instance Priv structure 5391 * @unload: indicate driver is unloading 5392 * 5393 * Free the OS and HW resources held by the driver 5394 **/ 5395 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5396 { 5397 struct hwi_context_memory *phwi_context; 5398 struct hwi_controller *phwi_ctrlr; 5399 struct be_eq_obj *pbe_eq; 5400 unsigned int i; 5401 5402 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5403 return; 5404 5405 phwi_ctrlr = phba->phwi_ctrlr; 5406 phwi_context = phwi_ctrlr->phwi_ctxt; 5407 hwi_disable_intr(phba); 5408 beiscsi_free_irqs(phba); 5409 pci_free_irq_vectors(phba->pcidev); 5410 5411 for (i = 0; i < phba->num_cpus; i++) { 5412 pbe_eq = &phwi_context->be_eq[i]; 5413 irq_poll_disable(&pbe_eq->iopoll); 5414 } 5415 cancel_delayed_work_sync(&phba->eqd_update); 5416 cancel_work_sync(&phba->boot_work); 5417 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5418 if (!unload && beiscsi_hba_in_error(phba)) { 5419 pbe_eq = &phwi_context->be_eq[i]; 5420 cancel_work_sync(&pbe_eq->mcc_work); 5421 } 5422 hwi_cleanup_port(phba); 5423 beiscsi_cleanup_port(phba); 5424 } 5425 5426 static void beiscsi_sess_work(struct work_struct *work) 5427 { 5428 struct beiscsi_hba *phba; 5429 5430 phba = container_of(work, struct beiscsi_hba, sess_work); 5431 /* 5432 * This work gets scheduled only in case of HBA error. 5433 * Old sessions are gone so need to be re-established. 5434 * iscsi_session_failure needs process context hence this work. 5435 */ 5436 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5437 } 5438 5439 static void beiscsi_recover_port(struct work_struct *work) 5440 { 5441 struct beiscsi_hba *phba; 5442 5443 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5444 beiscsi_disable_port(phba, 0); 5445 beiscsi_enable_port(phba); 5446 } 5447 5448 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5449 pci_channel_state_t state) 5450 { 5451 struct beiscsi_hba *phba = NULL; 5452 5453 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5454 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5455 5456 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5457 "BM_%d : EEH error detected\n"); 5458 5459 /* first stop UE detection when PCI error detected */ 5460 del_timer_sync(&phba->hw_check); 5461 cancel_delayed_work_sync(&phba->recover_port); 5462 5463 /* sessions are no longer valid, so first fail the sessions */ 5464 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5465 beiscsi_disable_port(phba, 0); 5466 5467 if (state == pci_channel_io_perm_failure) { 5468 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5469 "BM_%d : EEH : State PERM Failure"); 5470 return PCI_ERS_RESULT_DISCONNECT; 5471 } 5472 5473 pci_disable_device(pdev); 5474 5475 /* The error could cause the FW to trigger a flash debug dump. 5476 * Resetting the card while flash dump is in progress 5477 * can cause it not to recover; wait for it to finish. 5478 * Wait only for first function as it is needed only once per 5479 * adapter. 5480 **/ 5481 if (pdev->devfn == 0) 5482 ssleep(30); 5483 5484 return PCI_ERS_RESULT_NEED_RESET; 5485 } 5486 5487 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5488 { 5489 struct beiscsi_hba *phba = NULL; 5490 int status = 0; 5491 5492 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5493 5494 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5495 "BM_%d : EEH Reset\n"); 5496 5497 status = pci_enable_device(pdev); 5498 if (status) 5499 return PCI_ERS_RESULT_DISCONNECT; 5500 5501 pci_set_master(pdev); 5502 pci_set_power_state(pdev, PCI_D0); 5503 pci_restore_state(pdev); 5504 5505 status = beiscsi_check_fw_rdy(phba); 5506 if (status) { 5507 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5508 "BM_%d : EEH Reset Completed\n"); 5509 } else { 5510 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5511 "BM_%d : EEH Reset Completion Failure\n"); 5512 return PCI_ERS_RESULT_DISCONNECT; 5513 } 5514 5515 pci_cleanup_aer_uncorrect_error_status(pdev); 5516 return PCI_ERS_RESULT_RECOVERED; 5517 } 5518 5519 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5520 { 5521 struct beiscsi_hba *phba; 5522 int ret; 5523 5524 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5525 pci_save_state(pdev); 5526 5527 ret = beiscsi_enable_port(phba); 5528 if (ret) 5529 __beiscsi_log(phba, KERN_ERR, 5530 "BM_%d : AER EEH resume failed\n"); 5531 } 5532 5533 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5534 const struct pci_device_id *id) 5535 { 5536 struct hwi_context_memory *phwi_context; 5537 struct hwi_controller *phwi_ctrlr; 5538 struct beiscsi_hba *phba = NULL; 5539 struct be_eq_obj *pbe_eq; 5540 unsigned int s_handle; 5541 char wq_name[20]; 5542 int ret, i; 5543 5544 ret = beiscsi_enable_pci(pcidev); 5545 if (ret < 0) { 5546 dev_err(&pcidev->dev, 5547 "beiscsi_dev_probe - Failed to enable pci device\n"); 5548 return ret; 5549 } 5550 5551 phba = beiscsi_hba_alloc(pcidev); 5552 if (!phba) { 5553 dev_err(&pcidev->dev, 5554 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5555 ret = -ENOMEM; 5556 goto disable_pci; 5557 } 5558 5559 /* Enable EEH reporting */ 5560 ret = pci_enable_pcie_error_reporting(pcidev); 5561 if (ret) 5562 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5563 "BM_%d : PCIe Error Reporting " 5564 "Enabling Failed\n"); 5565 5566 pci_save_state(pcidev); 5567 5568 /* Initialize Driver configuration Paramters */ 5569 beiscsi_hba_attrs_init(phba); 5570 5571 phba->mac_addr_set = false; 5572 5573 switch (pcidev->device) { 5574 case BE_DEVICE_ID1: 5575 case OC_DEVICE_ID1: 5576 case OC_DEVICE_ID2: 5577 phba->generation = BE_GEN2; 5578 phba->iotask_fn = beiscsi_iotask; 5579 dev_warn(&pcidev->dev, 5580 "Obsolete/Unsupported BE2 Adapter Family\n"); 5581 break; 5582 case BE_DEVICE_ID2: 5583 case OC_DEVICE_ID3: 5584 phba->generation = BE_GEN3; 5585 phba->iotask_fn = beiscsi_iotask; 5586 break; 5587 case OC_SKH_ID1: 5588 phba->generation = BE_GEN4; 5589 phba->iotask_fn = beiscsi_iotask_v2; 5590 break; 5591 default: 5592 phba->generation = 0; 5593 } 5594 5595 ret = be_ctrl_init(phba, pcidev); 5596 if (ret) { 5597 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5598 "BM_%d : be_ctrl_init failed\n"); 5599 goto free_hba; 5600 } 5601 5602 ret = beiscsi_init_sliport(phba); 5603 if (ret) 5604 goto free_hba; 5605 5606 spin_lock_init(&phba->io_sgl_lock); 5607 spin_lock_init(&phba->mgmt_sgl_lock); 5608 spin_lock_init(&phba->async_pdu_lock); 5609 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5610 if (ret != 0) { 5611 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5612 "BM_%d : Error getting fw config\n"); 5613 goto free_port; 5614 } 5615 beiscsi_get_port_name(&phba->ctrl, phba); 5616 beiscsi_get_params(phba); 5617 beiscsi_set_host_data(phba); 5618 beiscsi_set_uer_feature(phba); 5619 5620 be2iscsi_enable_msix(phba); 5621 5622 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5623 "BM_%d : num_cpus = %d\n", 5624 phba->num_cpus); 5625 5626 phba->shost->max_id = phba->params.cxns_per_ctrl; 5627 phba->shost->can_queue = phba->params.ios_per_ctrl; 5628 ret = beiscsi_get_memory(phba); 5629 if (ret < 0) { 5630 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5631 "BM_%d : alloc host mem failed\n"); 5632 goto free_port; 5633 } 5634 5635 ret = beiscsi_init_port(phba); 5636 if (ret < 0) { 5637 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5638 "BM_%d : init port failed\n"); 5639 beiscsi_free_mem(phba); 5640 goto free_port; 5641 } 5642 5643 for (i = 0; i < MAX_MCC_CMD; i++) { 5644 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5645 phba->ctrl.mcc_tag[i] = i + 1; 5646 phba->ctrl.mcc_tag_status[i + 1] = 0; 5647 phba->ctrl.mcc_tag_available++; 5648 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5649 sizeof(struct be_dma_mem)); 5650 } 5651 5652 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5653 5654 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5655 phba->shost->host_no); 5656 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5657 if (!phba->wq) { 5658 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5659 "BM_%d : beiscsi_dev_probe-" 5660 "Failed to allocate work queue\n"); 5661 ret = -ENOMEM; 5662 goto free_twq; 5663 } 5664 5665 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5666 5667 phwi_ctrlr = phba->phwi_ctrlr; 5668 phwi_context = phwi_ctrlr->phwi_ctxt; 5669 5670 for (i = 0; i < phba->num_cpus; i++) { 5671 pbe_eq = &phwi_context->be_eq[i]; 5672 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5673 } 5674 5675 i = (phba->pcidev->msix_enabled) ? i : 0; 5676 /* Work item for MCC handling */ 5677 pbe_eq = &phwi_context->be_eq[i]; 5678 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5679 5680 ret = beiscsi_init_irqs(phba); 5681 if (ret < 0) { 5682 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5683 "BM_%d : beiscsi_dev_probe-" 5684 "Failed to beiscsi_init_irqs\n"); 5685 goto disable_iopoll; 5686 } 5687 hwi_enable_intr(phba); 5688 5689 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5690 if (ret) 5691 goto free_irqs; 5692 5693 /* set online bit after port is operational */ 5694 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5695 __beiscsi_log(phba, KERN_INFO, 5696 "BM_%d : port online: 0x%lx\n", phba->state); 5697 5698 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5699 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5700 if (ret > 0) { 5701 beiscsi_start_boot_work(phba, s_handle); 5702 /** 5703 * Set this bit after starting the work to let 5704 * probe handle it first. 5705 * ASYNC event can too schedule this work. 5706 */ 5707 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5708 } 5709 5710 beiscsi_iface_create_default(phba); 5711 schedule_delayed_work(&phba->eqd_update, 5712 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5713 5714 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5715 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5716 /** 5717 * Start UE detection here. UE before this will cause stall in probe 5718 * and eventually fail the probe. 5719 */ 5720 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5721 mod_timer(&phba->hw_check, 5722 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5723 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5724 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5725 return 0; 5726 5727 free_irqs: 5728 hwi_disable_intr(phba); 5729 beiscsi_free_irqs(phba); 5730 disable_iopoll: 5731 for (i = 0; i < phba->num_cpus; i++) { 5732 pbe_eq = &phwi_context->be_eq[i]; 5733 irq_poll_disable(&pbe_eq->iopoll); 5734 } 5735 destroy_workqueue(phba->wq); 5736 free_twq: 5737 hwi_cleanup_port(phba); 5738 beiscsi_cleanup_port(phba); 5739 beiscsi_free_mem(phba); 5740 free_port: 5741 pci_free_consistent(phba->pcidev, 5742 phba->ctrl.mbox_mem_alloced.size, 5743 phba->ctrl.mbox_mem_alloced.va, 5744 phba->ctrl.mbox_mem_alloced.dma); 5745 beiscsi_unmap_pci_function(phba); 5746 free_hba: 5747 pci_disable_msix(phba->pcidev); 5748 pci_dev_put(phba->pcidev); 5749 iscsi_host_free(phba->shost); 5750 pci_set_drvdata(pcidev, NULL); 5751 disable_pci: 5752 pci_release_regions(pcidev); 5753 pci_disable_device(pcidev); 5754 return ret; 5755 } 5756 5757 static void beiscsi_remove(struct pci_dev *pcidev) 5758 { 5759 struct beiscsi_hba *phba = NULL; 5760 5761 phba = pci_get_drvdata(pcidev); 5762 if (!phba) { 5763 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5764 return; 5765 } 5766 5767 /* first stop UE detection before unloading */ 5768 del_timer_sync(&phba->hw_check); 5769 cancel_delayed_work_sync(&phba->recover_port); 5770 cancel_work_sync(&phba->sess_work); 5771 5772 beiscsi_iface_destroy_default(phba); 5773 iscsi_host_remove(phba->shost); 5774 beiscsi_disable_port(phba, 1); 5775 5776 /* after cancelling boot_work */ 5777 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5778 5779 /* free all resources */ 5780 destroy_workqueue(phba->wq); 5781 beiscsi_free_mem(phba); 5782 5783 /* ctrl uninit */ 5784 beiscsi_unmap_pci_function(phba); 5785 pci_free_consistent(phba->pcidev, 5786 phba->ctrl.mbox_mem_alloced.size, 5787 phba->ctrl.mbox_mem_alloced.va, 5788 phba->ctrl.mbox_mem_alloced.dma); 5789 5790 pci_dev_put(phba->pcidev); 5791 iscsi_host_free(phba->shost); 5792 pci_disable_pcie_error_reporting(pcidev); 5793 pci_set_drvdata(pcidev, NULL); 5794 pci_release_regions(pcidev); 5795 pci_disable_device(pcidev); 5796 } 5797 5798 5799 static struct pci_error_handlers beiscsi_eeh_handlers = { 5800 .error_detected = beiscsi_eeh_err_detected, 5801 .slot_reset = beiscsi_eeh_reset, 5802 .resume = beiscsi_eeh_resume, 5803 }; 5804 5805 struct iscsi_transport beiscsi_iscsi_transport = { 5806 .owner = THIS_MODULE, 5807 .name = DRV_NAME, 5808 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5809 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5810 .create_session = beiscsi_session_create, 5811 .destroy_session = beiscsi_session_destroy, 5812 .create_conn = beiscsi_conn_create, 5813 .bind_conn = beiscsi_conn_bind, 5814 .destroy_conn = iscsi_conn_teardown, 5815 .attr_is_visible = beiscsi_attr_is_visible, 5816 .set_iface_param = beiscsi_iface_set_param, 5817 .get_iface_param = beiscsi_iface_get_param, 5818 .set_param = beiscsi_set_param, 5819 .get_conn_param = iscsi_conn_get_param, 5820 .get_session_param = iscsi_session_get_param, 5821 .get_host_param = beiscsi_get_host_param, 5822 .start_conn = beiscsi_conn_start, 5823 .stop_conn = iscsi_conn_stop, 5824 .send_pdu = iscsi_conn_send_pdu, 5825 .xmit_task = beiscsi_task_xmit, 5826 .cleanup_task = beiscsi_cleanup_task, 5827 .alloc_pdu = beiscsi_alloc_pdu, 5828 .parse_pdu_itt = beiscsi_parse_pdu, 5829 .get_stats = beiscsi_conn_get_stats, 5830 .get_ep_param = beiscsi_ep_get_param, 5831 .ep_connect = beiscsi_ep_connect, 5832 .ep_poll = beiscsi_ep_poll, 5833 .ep_disconnect = beiscsi_ep_disconnect, 5834 .session_recovery_timedout = iscsi_session_recovery_timedout, 5835 .bsg_request = beiscsi_bsg_request, 5836 }; 5837 5838 static struct pci_driver beiscsi_pci_driver = { 5839 .name = DRV_NAME, 5840 .probe = beiscsi_dev_probe, 5841 .remove = beiscsi_remove, 5842 .id_table = beiscsi_pci_id_table, 5843 .err_handler = &beiscsi_eeh_handlers 5844 }; 5845 5846 static int __init beiscsi_module_init(void) 5847 { 5848 int ret; 5849 5850 beiscsi_scsi_transport = 5851 iscsi_register_transport(&beiscsi_iscsi_transport); 5852 if (!beiscsi_scsi_transport) { 5853 printk(KERN_ERR 5854 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5855 return -ENOMEM; 5856 } 5857 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5858 &beiscsi_iscsi_transport); 5859 5860 ret = pci_register_driver(&beiscsi_pci_driver); 5861 if (ret) { 5862 printk(KERN_ERR 5863 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5864 goto unregister_iscsi_transport; 5865 } 5866 return 0; 5867 5868 unregister_iscsi_transport: 5869 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5870 return ret; 5871 } 5872 5873 static void __exit beiscsi_module_exit(void) 5874 { 5875 pci_unregister_driver(&beiscsi_pci_driver); 5876 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5877 } 5878 5879 module_init(beiscsi_module_init); 5880 module_exit(beiscsi_module_exit); 5881