1 /** 2 * Copyright (C) 2005 - 2016 Broadcom 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com) 11 * 12 * Contact Information: 13 * linux-drivers@broadcom.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 #include <linux/irq_poll.h> 33 34 #include <scsi/libiscsi.h> 35 #include <scsi/scsi_bsg_iscsi.h> 36 #include <scsi/scsi_netlink.h> 37 #include <scsi/scsi_transport_iscsi.h> 38 #include <scsi/scsi_transport.h> 39 #include <scsi/scsi_cmnd.h> 40 #include <scsi/scsi_device.h> 41 #include <scsi/scsi_host.h> 42 #include <scsi/scsi.h> 43 #include "be_main.h" 44 #include "be_iscsi.h" 45 #include "be_mgmt.h" 46 #include "be_cmds.h" 47 48 static unsigned int be_iopoll_budget = 10; 49 static unsigned int be_max_phys_size = 64; 50 static unsigned int enable_msix = 1; 51 52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 53 MODULE_VERSION(BUILD_STR); 54 MODULE_AUTHOR("Emulex Corporation"); 55 MODULE_LICENSE("GPL"); 56 module_param(be_iopoll_budget, int, 0); 57 module_param(enable_msix, int, 0); 58 module_param(be_max_phys_size, uint, S_IRUGO); 59 MODULE_PARM_DESC(be_max_phys_size, 60 "Maximum Size (In Kilobytes) of physically contiguous " 61 "memory that can be allocated. Range is 16 - 128"); 62 63 #define beiscsi_disp_param(_name)\ 64 static ssize_t \ 65 beiscsi_##_name##_disp(struct device *dev,\ 66 struct device_attribute *attrib, char *buf) \ 67 { \ 68 struct Scsi_Host *shost = class_to_shost(dev);\ 69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 70 uint32_t param_val = 0; \ 71 param_val = phba->attr_##_name;\ 72 return snprintf(buf, PAGE_SIZE, "%d\n",\ 73 phba->attr_##_name);\ 74 } 75 76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 77 static int \ 78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 79 {\ 80 if (val >= _minval && val <= _maxval) {\ 81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 82 "BA_%d : beiscsi_"#_name" updated "\ 83 "from 0x%x ==> 0x%x\n",\ 84 phba->attr_##_name, val); \ 85 phba->attr_##_name = val;\ 86 return 0;\ 87 } \ 88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 89 "BA_%d beiscsi_"#_name" attribute "\ 90 "cannot be updated to 0x%x, "\ 91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 92 return -EINVAL;\ 93 } 94 95 #define beiscsi_store_param(_name) \ 96 static ssize_t \ 97 beiscsi_##_name##_store(struct device *dev,\ 98 struct device_attribute *attr, const char *buf,\ 99 size_t count) \ 100 { \ 101 struct Scsi_Host *shost = class_to_shost(dev);\ 102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 103 uint32_t param_val = 0;\ 104 if (!isdigit(buf[0]))\ 105 return -EINVAL;\ 106 if (sscanf(buf, "%i", ¶m_val) != 1)\ 107 return -EINVAL;\ 108 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 109 return strlen(buf);\ 110 else \ 111 return -EINVAL;\ 112 } 113 114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 115 static int \ 116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 117 { \ 118 if (val >= _minval && val <= _maxval) {\ 119 phba->attr_##_name = val;\ 120 return 0;\ 121 } \ 122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 123 "BA_%d beiscsi_"#_name" attribute " \ 124 "cannot be updated to 0x%x, "\ 125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 126 phba->attr_##_name = _defval;\ 127 return -EINVAL;\ 128 } 129 130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 131 static uint beiscsi_##_name = _defval;\ 132 module_param(beiscsi_##_name, uint, S_IRUGO);\ 133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 134 beiscsi_disp_param(_name)\ 135 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 136 beiscsi_store_param(_name)\ 137 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 139 beiscsi_##_name##_disp, beiscsi_##_name##_store) 140 141 /* 142 * When new log level added update the 143 * the MAX allowed value for log_enable 144 */ 145 BEISCSI_RW_ATTR(log_enable, 0x00, 146 0xFF, 0x00, "Enable logging Bit Mask\n" 147 "\t\t\t\tInitialization Events : 0x01\n" 148 "\t\t\t\tMailbox Events : 0x02\n" 149 "\t\t\t\tMiscellaneous Events : 0x04\n" 150 "\t\t\t\tError Handling : 0x08\n" 151 "\t\t\t\tIO Path Events : 0x10\n" 152 "\t\t\t\tConfiguration Path : 0x20\n" 153 "\t\t\t\tiSCSI Protocol : 0x40\n"); 154 155 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 156 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 157 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 158 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 159 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 160 beiscsi_active_session_disp, NULL); 161 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 162 beiscsi_free_session_disp, NULL); 163 struct device_attribute *beiscsi_attrs[] = { 164 &dev_attr_beiscsi_log_enable, 165 &dev_attr_beiscsi_drvr_ver, 166 &dev_attr_beiscsi_adapter_family, 167 &dev_attr_beiscsi_fw_ver, 168 &dev_attr_beiscsi_active_session_count, 169 &dev_attr_beiscsi_free_session_count, 170 &dev_attr_beiscsi_phys_port, 171 NULL, 172 }; 173 174 static char const *cqe_desc[] = { 175 "RESERVED_DESC", 176 "SOL_CMD_COMPLETE", 177 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 178 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 179 "CXN_KILLED_BURST_LEN_MISMATCH", 180 "CXN_KILLED_AHS_RCVD", 181 "CXN_KILLED_HDR_DIGEST_ERR", 182 "CXN_KILLED_UNKNOWN_HDR", 183 "CXN_KILLED_STALE_ITT_TTT_RCVD", 184 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 185 "CXN_KILLED_RST_RCVD", 186 "CXN_KILLED_TIMED_OUT", 187 "CXN_KILLED_RST_SENT", 188 "CXN_KILLED_FIN_RCVD", 189 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 190 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 191 "CXN_KILLED_OVER_RUN_RESIDUAL", 192 "CXN_KILLED_UNDER_RUN_RESIDUAL", 193 "CMD_KILLED_INVALID_STATSN_RCVD", 194 "CMD_KILLED_INVALID_R2T_RCVD", 195 "CMD_CXN_KILLED_LUN_INVALID", 196 "CMD_CXN_KILLED_ICD_INVALID", 197 "CMD_CXN_KILLED_ITT_INVALID", 198 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 199 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 200 "CXN_INVALIDATE_NOTIFY", 201 "CXN_INVALIDATE_INDEX_NOTIFY", 202 "CMD_INVALIDATED_NOTIFY", 203 "UNSOL_HDR_NOTIFY", 204 "UNSOL_DATA_NOTIFY", 205 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 206 "DRIVERMSG_NOTIFY", 207 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 208 "SOL_CMD_KILLED_DIF_ERR", 209 "CXN_KILLED_SYN_RCVD", 210 "CXN_KILLED_IMM_DATA_RCVD" 211 }; 212 213 static int beiscsi_slave_configure(struct scsi_device *sdev) 214 { 215 blk_queue_max_segment_size(sdev->request_queue, 65536); 216 return 0; 217 } 218 219 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 220 { 221 struct iscsi_cls_session *cls_session; 222 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 223 struct beiscsi_io_task *aborted_io_task; 224 struct iscsi_conn *conn; 225 struct beiscsi_conn *beiscsi_conn; 226 struct beiscsi_hba *phba; 227 struct iscsi_session *session; 228 struct invalidate_command_table *inv_tbl; 229 struct be_dma_mem nonemb_cmd; 230 unsigned int cid, tag, num_invalidate; 231 int rc; 232 233 cls_session = starget_to_session(scsi_target(sc->device)); 234 session = cls_session->dd_data; 235 236 spin_lock_bh(&session->frwd_lock); 237 if (!aborted_task || !aborted_task->sc) { 238 /* we raced */ 239 spin_unlock_bh(&session->frwd_lock); 240 return SUCCESS; 241 } 242 243 aborted_io_task = aborted_task->dd_data; 244 if (!aborted_io_task->scsi_cmnd) { 245 /* raced or invalid command */ 246 spin_unlock_bh(&session->frwd_lock); 247 return SUCCESS; 248 } 249 spin_unlock_bh(&session->frwd_lock); 250 /* Invalidate WRB Posted for this Task */ 251 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 252 aborted_io_task->pwrb_handle->pwrb, 253 1); 254 255 conn = aborted_task->conn; 256 beiscsi_conn = conn->dd_data; 257 phba = beiscsi_conn->phba; 258 259 /* invalidate iocb */ 260 cid = beiscsi_conn->beiscsi_conn_cid; 261 inv_tbl = phba->inv_tbl; 262 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 263 inv_tbl->cid = cid; 264 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 265 num_invalidate = 1; 266 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 267 sizeof(struct invalidate_commands_params_in), 268 &nonemb_cmd.dma); 269 if (nonemb_cmd.va == NULL) { 270 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 271 "BM_%d : Failed to allocate memory for" 272 "mgmt_invalidate_icds\n"); 273 return FAILED; 274 } 275 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 276 277 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 278 cid, &nonemb_cmd); 279 if (!tag) { 280 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 281 "BM_%d : mgmt_invalidate_icds could not be" 282 "submitted\n"); 283 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 284 nonemb_cmd.va, nonemb_cmd.dma); 285 286 return FAILED; 287 } 288 289 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); 290 if (rc != -EBUSY) 291 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 292 nonemb_cmd.va, nonemb_cmd.dma); 293 294 return iscsi_eh_abort(sc); 295 } 296 297 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 298 { 299 struct iscsi_task *abrt_task; 300 struct beiscsi_io_task *abrt_io_task; 301 struct iscsi_conn *conn; 302 struct beiscsi_conn *beiscsi_conn; 303 struct beiscsi_hba *phba; 304 struct iscsi_session *session; 305 struct iscsi_cls_session *cls_session; 306 struct invalidate_command_table *inv_tbl; 307 struct be_dma_mem nonemb_cmd; 308 unsigned int cid, tag, i, num_invalidate; 309 int rc; 310 311 /* invalidate iocbs */ 312 cls_session = starget_to_session(scsi_target(sc->device)); 313 session = cls_session->dd_data; 314 spin_lock_bh(&session->frwd_lock); 315 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 316 spin_unlock_bh(&session->frwd_lock); 317 return FAILED; 318 } 319 conn = session->leadconn; 320 beiscsi_conn = conn->dd_data; 321 phba = beiscsi_conn->phba; 322 cid = beiscsi_conn->beiscsi_conn_cid; 323 inv_tbl = phba->inv_tbl; 324 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 325 num_invalidate = 0; 326 for (i = 0; i < conn->session->cmds_max; i++) { 327 abrt_task = conn->session->cmds[i]; 328 abrt_io_task = abrt_task->dd_data; 329 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 330 continue; 331 332 if (sc->device->lun != abrt_task->sc->device->lun) 333 continue; 334 335 /* Invalidate WRB Posted for this Task */ 336 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 337 abrt_io_task->pwrb_handle->pwrb, 338 1); 339 340 inv_tbl->cid = cid; 341 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 342 num_invalidate++; 343 inv_tbl++; 344 } 345 spin_unlock_bh(&session->frwd_lock); 346 inv_tbl = phba->inv_tbl; 347 348 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 349 sizeof(struct invalidate_commands_params_in), 350 &nonemb_cmd.dma); 351 if (nonemb_cmd.va == NULL) { 352 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 353 "BM_%d : Failed to allocate memory for" 354 "mgmt_invalidate_icds\n"); 355 return FAILED; 356 } 357 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 358 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 359 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 360 cid, &nonemb_cmd); 361 if (!tag) { 362 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 363 "BM_%d : mgmt_invalidate_icds could not be" 364 " submitted\n"); 365 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 366 nonemb_cmd.va, nonemb_cmd.dma); 367 return FAILED; 368 } 369 370 rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd); 371 if (rc != -EBUSY) 372 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 373 nonemb_cmd.va, nonemb_cmd.dma); 374 return iscsi_eh_device_reset(sc); 375 } 376 377 /*------------------- PCI Driver operations and data ----------------- */ 378 static const struct pci_device_id beiscsi_pci_id_table[] = { 379 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 380 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 381 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 382 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 383 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 384 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 385 { 0 } 386 }; 387 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 388 389 390 static struct scsi_host_template beiscsi_sht = { 391 .module = THIS_MODULE, 392 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 393 .proc_name = DRV_NAME, 394 .queuecommand = iscsi_queuecommand, 395 .change_queue_depth = scsi_change_queue_depth, 396 .slave_configure = beiscsi_slave_configure, 397 .target_alloc = iscsi_target_alloc, 398 .eh_abort_handler = beiscsi_eh_abort, 399 .eh_device_reset_handler = beiscsi_eh_device_reset, 400 .eh_target_reset_handler = iscsi_eh_session_reset, 401 .shost_attrs = beiscsi_attrs, 402 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 403 .can_queue = BE2_IO_DEPTH, 404 .this_id = -1, 405 .max_sectors = BEISCSI_MAX_SECTORS, 406 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 407 .use_clustering = ENABLE_CLUSTERING, 408 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 409 .track_queue_depth = 1, 410 }; 411 412 static struct scsi_transport_template *beiscsi_scsi_transport; 413 414 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 415 { 416 struct beiscsi_hba *phba; 417 struct Scsi_Host *shost; 418 419 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 420 if (!shost) { 421 dev_err(&pcidev->dev, 422 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 423 return NULL; 424 } 425 shost->max_id = BE2_MAX_SESSIONS; 426 shost->max_channel = 0; 427 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 428 shost->max_lun = BEISCSI_NUM_MAX_LUN; 429 shost->transportt = beiscsi_scsi_transport; 430 phba = iscsi_host_priv(shost); 431 memset(phba, 0, sizeof(*phba)); 432 phba->shost = shost; 433 phba->pcidev = pci_dev_get(pcidev); 434 pci_set_drvdata(pcidev, phba); 435 phba->interface_handle = 0xFFFFFFFF; 436 437 return phba; 438 } 439 440 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 441 { 442 if (phba->csr_va) { 443 iounmap(phba->csr_va); 444 phba->csr_va = NULL; 445 } 446 if (phba->db_va) { 447 iounmap(phba->db_va); 448 phba->db_va = NULL; 449 } 450 if (phba->pci_va) { 451 iounmap(phba->pci_va); 452 phba->pci_va = NULL; 453 } 454 } 455 456 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 457 struct pci_dev *pcidev) 458 { 459 u8 __iomem *addr; 460 int pcicfg_reg; 461 462 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 463 pci_resource_len(pcidev, 2)); 464 if (addr == NULL) 465 return -ENOMEM; 466 phba->ctrl.csr = addr; 467 phba->csr_va = addr; 468 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 469 470 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 471 if (addr == NULL) 472 goto pci_map_err; 473 phba->ctrl.db = addr; 474 phba->db_va = addr; 475 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 476 477 if (phba->generation == BE_GEN2) 478 pcicfg_reg = 1; 479 else 480 pcicfg_reg = 0; 481 482 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 483 pci_resource_len(pcidev, pcicfg_reg)); 484 485 if (addr == NULL) 486 goto pci_map_err; 487 phba->ctrl.pcicfg = addr; 488 phba->pci_va = addr; 489 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 490 return 0; 491 492 pci_map_err: 493 beiscsi_unmap_pci_function(phba); 494 return -ENOMEM; 495 } 496 497 static int beiscsi_enable_pci(struct pci_dev *pcidev) 498 { 499 int ret; 500 501 ret = pci_enable_device(pcidev); 502 if (ret) { 503 dev_err(&pcidev->dev, 504 "beiscsi_enable_pci - enable device failed\n"); 505 return ret; 506 } 507 508 ret = pci_request_regions(pcidev, DRV_NAME); 509 if (ret) { 510 dev_err(&pcidev->dev, 511 "beiscsi_enable_pci - request region failed\n"); 512 goto pci_dev_disable; 513 } 514 515 pci_set_master(pcidev); 516 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 517 if (ret) { 518 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 519 if (ret) { 520 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 521 goto pci_region_release; 522 } else { 523 ret = pci_set_consistent_dma_mask(pcidev, 524 DMA_BIT_MASK(32)); 525 } 526 } else { 527 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 528 if (ret) { 529 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 530 goto pci_region_release; 531 } 532 } 533 return 0; 534 535 pci_region_release: 536 pci_release_regions(pcidev); 537 pci_dev_disable: 538 pci_disable_device(pcidev); 539 540 return ret; 541 } 542 543 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 544 { 545 struct be_ctrl_info *ctrl = &phba->ctrl; 546 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 547 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 548 int status = 0; 549 550 ctrl->pdev = pdev; 551 status = beiscsi_map_pci_bars(phba, pdev); 552 if (status) 553 return status; 554 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 555 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 556 mbox_mem_alloc->size, 557 &mbox_mem_alloc->dma); 558 if (!mbox_mem_alloc->va) { 559 beiscsi_unmap_pci_function(phba); 560 return -ENOMEM; 561 } 562 563 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 564 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 565 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 566 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 567 mutex_init(&ctrl->mbox_lock); 568 spin_lock_init(&phba->ctrl.mcc_lock); 569 570 return status; 571 } 572 573 /** 574 * beiscsi_get_params()- Set the config paramters 575 * @phba: ptr device priv structure 576 **/ 577 static void beiscsi_get_params(struct beiscsi_hba *phba) 578 { 579 uint32_t total_cid_count = 0; 580 uint32_t total_icd_count = 0; 581 uint8_t ulp_num = 0; 582 583 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 584 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 585 586 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 587 uint32_t align_mask = 0; 588 uint32_t icd_post_per_page = 0; 589 uint32_t icd_count_unavailable = 0; 590 uint32_t icd_start = 0, icd_count = 0; 591 uint32_t icd_start_align = 0, icd_count_align = 0; 592 593 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 594 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 595 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 596 597 /* Get ICD count that can be posted on each page */ 598 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 599 sizeof(struct iscsi_sge))); 600 align_mask = (icd_post_per_page - 1); 601 602 /* Check if icd_start is aligned ICD per page posting */ 603 if (icd_start % icd_post_per_page) { 604 icd_start_align = ((icd_start + 605 icd_post_per_page) & 606 ~(align_mask)); 607 phba->fw_config. 608 iscsi_icd_start[ulp_num] = 609 icd_start_align; 610 } 611 612 icd_count_align = (icd_count & ~align_mask); 613 614 /* ICD discarded in the process of alignment */ 615 if (icd_start_align) 616 icd_count_unavailable = ((icd_start_align - 617 icd_start) + 618 (icd_count - 619 icd_count_align)); 620 621 /* Updated ICD count available */ 622 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 623 icd_count_unavailable); 624 625 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 626 "BM_%d : Aligned ICD values\n" 627 "\t ICD Start : %d\n" 628 "\t ICD Count : %d\n" 629 "\t ICD Discarded : %d\n", 630 phba->fw_config. 631 iscsi_icd_start[ulp_num], 632 phba->fw_config. 633 iscsi_icd_count[ulp_num], 634 icd_count_unavailable); 635 break; 636 } 637 } 638 639 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 640 phba->params.ios_per_ctrl = (total_icd_count - 641 (total_cid_count + 642 BE2_TMFS + BE2_NOPOUT_REQ)); 643 phba->params.cxns_per_ctrl = total_cid_count; 644 phba->params.asyncpdus_per_ctrl = total_cid_count; 645 phba->params.icds_per_ctrl = total_icd_count; 646 phba->params.num_sge_per_io = BE2_SGE; 647 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 648 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 649 phba->params.eq_timer = 64; 650 phba->params.num_eq_entries = 1024; 651 phba->params.num_cq_entries = 1024; 652 phba->params.wrbs_per_cxn = 256; 653 } 654 655 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 656 unsigned int id, unsigned int clr_interrupt, 657 unsigned int num_processed, 658 unsigned char rearm, unsigned char event) 659 { 660 u32 val = 0; 661 662 if (rearm) 663 val |= 1 << DB_EQ_REARM_SHIFT; 664 if (clr_interrupt) 665 val |= 1 << DB_EQ_CLR_SHIFT; 666 if (event) 667 val |= 1 << DB_EQ_EVNT_SHIFT; 668 669 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 670 /* Setting lower order EQ_ID Bits */ 671 val |= (id & DB_EQ_RING_ID_LOW_MASK); 672 673 /* Setting Higher order EQ_ID Bits */ 674 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 675 DB_EQ_RING_ID_HIGH_MASK) 676 << DB_EQ_HIGH_SET_SHIFT); 677 678 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 679 } 680 681 /** 682 * be_isr_mcc - The isr routine of the driver. 683 * @irq: Not used 684 * @dev_id: Pointer to host adapter structure 685 */ 686 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 687 { 688 struct beiscsi_hba *phba; 689 struct be_eq_entry *eqe; 690 struct be_queue_info *eq; 691 struct be_queue_info *mcc; 692 unsigned int mcc_events; 693 struct be_eq_obj *pbe_eq; 694 695 pbe_eq = dev_id; 696 eq = &pbe_eq->q; 697 phba = pbe_eq->phba; 698 mcc = &phba->ctrl.mcc_obj.cq; 699 eqe = queue_tail_node(eq); 700 701 mcc_events = 0; 702 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 703 & EQE_VALID_MASK) { 704 if (((eqe->dw[offsetof(struct amap_eq_entry, 705 resource_id) / 32] & 706 EQE_RESID_MASK) >> 16) == mcc->id) { 707 mcc_events++; 708 } 709 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 710 queue_tail_inc(eq); 711 eqe = queue_tail_node(eq); 712 } 713 714 if (mcc_events) { 715 queue_work(phba->wq, &pbe_eq->mcc_work); 716 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 717 } 718 return IRQ_HANDLED; 719 } 720 721 /** 722 * be_isr_msix - The isr routine of the driver. 723 * @irq: Not used 724 * @dev_id: Pointer to host adapter structure 725 */ 726 static irqreturn_t be_isr_msix(int irq, void *dev_id) 727 { 728 struct beiscsi_hba *phba; 729 struct be_queue_info *eq; 730 struct be_eq_obj *pbe_eq; 731 732 pbe_eq = dev_id; 733 eq = &pbe_eq->q; 734 735 phba = pbe_eq->phba; 736 /* disable interrupt till iopoll completes */ 737 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 738 irq_poll_sched(&pbe_eq->iopoll); 739 740 return IRQ_HANDLED; 741 } 742 743 /** 744 * be_isr - The isr routine of the driver. 745 * @irq: Not used 746 * @dev_id: Pointer to host adapter structure 747 */ 748 static irqreturn_t be_isr(int irq, void *dev_id) 749 { 750 struct beiscsi_hba *phba; 751 struct hwi_controller *phwi_ctrlr; 752 struct hwi_context_memory *phwi_context; 753 struct be_eq_entry *eqe; 754 struct be_queue_info *eq; 755 struct be_queue_info *mcc; 756 unsigned int mcc_events, io_events; 757 struct be_ctrl_info *ctrl; 758 struct be_eq_obj *pbe_eq; 759 int isr, rearm; 760 761 phba = dev_id; 762 ctrl = &phba->ctrl; 763 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 764 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 765 if (!isr) 766 return IRQ_NONE; 767 768 phwi_ctrlr = phba->phwi_ctrlr; 769 phwi_context = phwi_ctrlr->phwi_ctxt; 770 pbe_eq = &phwi_context->be_eq[0]; 771 772 eq = &phwi_context->be_eq[0].q; 773 mcc = &phba->ctrl.mcc_obj.cq; 774 eqe = queue_tail_node(eq); 775 776 io_events = 0; 777 mcc_events = 0; 778 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 779 & EQE_VALID_MASK) { 780 if (((eqe->dw[offsetof(struct amap_eq_entry, 781 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 782 mcc_events++; 783 else 784 io_events++; 785 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 786 queue_tail_inc(eq); 787 eqe = queue_tail_node(eq); 788 } 789 if (!io_events && !mcc_events) 790 return IRQ_NONE; 791 792 /* no need to rearm if interrupt is only for IOs */ 793 rearm = 0; 794 if (mcc_events) { 795 queue_work(phba->wq, &pbe_eq->mcc_work); 796 /* rearm for MCCQ */ 797 rearm = 1; 798 } 799 if (io_events) 800 irq_poll_sched(&pbe_eq->iopoll); 801 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 802 return IRQ_HANDLED; 803 } 804 805 806 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 807 { 808 struct pci_dev *pcidev = phba->pcidev; 809 struct hwi_controller *phwi_ctrlr; 810 struct hwi_context_memory *phwi_context; 811 int ret, msix_vec, i, j; 812 813 phwi_ctrlr = phba->phwi_ctrlr; 814 phwi_context = phwi_ctrlr->phwi_ctxt; 815 816 if (phba->msix_enabled) { 817 for (i = 0; i < phba->num_cpus; i++) { 818 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 819 GFP_KERNEL); 820 if (!phba->msi_name[i]) { 821 ret = -ENOMEM; 822 goto free_msix_irqs; 823 } 824 825 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 826 phba->shost->host_no, i); 827 msix_vec = phba->msix_entries[i].vector; 828 ret = request_irq(msix_vec, be_isr_msix, 0, 829 phba->msi_name[i], 830 &phwi_context->be_eq[i]); 831 if (ret) { 832 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 833 "BM_%d : beiscsi_init_irqs-Failed to" 834 "register msix for i = %d\n", 835 i); 836 kfree(phba->msi_name[i]); 837 goto free_msix_irqs; 838 } 839 } 840 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 841 if (!phba->msi_name[i]) { 842 ret = -ENOMEM; 843 goto free_msix_irqs; 844 } 845 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 846 phba->shost->host_no); 847 msix_vec = phba->msix_entries[i].vector; 848 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 849 &phwi_context->be_eq[i]); 850 if (ret) { 851 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 852 "BM_%d : beiscsi_init_irqs-" 853 "Failed to register beiscsi_msix_mcc\n"); 854 kfree(phba->msi_name[i]); 855 goto free_msix_irqs; 856 } 857 858 } else { 859 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 860 "beiscsi", phba); 861 if (ret) { 862 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 863 "BM_%d : beiscsi_init_irqs-" 864 "Failed to register irq\\n"); 865 return ret; 866 } 867 } 868 return 0; 869 free_msix_irqs: 870 for (j = i - 1; j >= 0; j--) { 871 kfree(phba->msi_name[j]); 872 msix_vec = phba->msix_entries[j].vector; 873 free_irq(msix_vec, &phwi_context->be_eq[j]); 874 } 875 return ret; 876 } 877 878 void hwi_ring_cq_db(struct beiscsi_hba *phba, 879 unsigned int id, unsigned int num_processed, 880 unsigned char rearm) 881 { 882 u32 val = 0; 883 884 if (rearm) 885 val |= 1 << DB_CQ_REARM_SHIFT; 886 887 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 888 889 /* Setting lower order CQ_ID Bits */ 890 val |= (id & DB_CQ_RING_ID_LOW_MASK); 891 892 /* Setting Higher order CQ_ID Bits */ 893 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 894 DB_CQ_RING_ID_HIGH_MASK) 895 << DB_CQ_HIGH_SET_SHIFT); 896 897 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 898 } 899 900 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 901 { 902 struct sgl_handle *psgl_handle; 903 904 spin_lock_bh(&phba->io_sgl_lock); 905 if (phba->io_sgl_hndl_avbl) { 906 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 907 "BM_%d : In alloc_io_sgl_handle," 908 " io_sgl_alloc_index=%d\n", 909 phba->io_sgl_alloc_index); 910 911 psgl_handle = phba->io_sgl_hndl_base[phba-> 912 io_sgl_alloc_index]; 913 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 914 phba->io_sgl_hndl_avbl--; 915 if (phba->io_sgl_alloc_index == (phba->params. 916 ios_per_ctrl - 1)) 917 phba->io_sgl_alloc_index = 0; 918 else 919 phba->io_sgl_alloc_index++; 920 } else 921 psgl_handle = NULL; 922 spin_unlock_bh(&phba->io_sgl_lock); 923 return psgl_handle; 924 } 925 926 static void 927 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 928 { 929 spin_lock_bh(&phba->io_sgl_lock); 930 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 931 "BM_%d : In free_,io_sgl_free_index=%d\n", 932 phba->io_sgl_free_index); 933 934 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 935 /* 936 * this can happen if clean_task is called on a task that 937 * failed in xmit_task or alloc_pdu. 938 */ 939 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 940 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 941 "value there=%p\n", phba->io_sgl_free_index, 942 phba->io_sgl_hndl_base 943 [phba->io_sgl_free_index]); 944 spin_unlock_bh(&phba->io_sgl_lock); 945 return; 946 } 947 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 948 phba->io_sgl_hndl_avbl++; 949 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 950 phba->io_sgl_free_index = 0; 951 else 952 phba->io_sgl_free_index++; 953 spin_unlock_bh(&phba->io_sgl_lock); 954 } 955 956 static inline struct wrb_handle * 957 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 958 unsigned int wrbs_per_cxn) 959 { 960 struct wrb_handle *pwrb_handle; 961 962 spin_lock_bh(&pwrb_context->wrb_lock); 963 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 964 pwrb_context->wrb_handles_available--; 965 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 966 pwrb_context->alloc_index = 0; 967 else 968 pwrb_context->alloc_index++; 969 spin_unlock_bh(&pwrb_context->wrb_lock); 970 971 if (pwrb_handle) 972 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 973 974 return pwrb_handle; 975 } 976 977 /** 978 * alloc_wrb_handle - To allocate a wrb handle 979 * @phba: The hba pointer 980 * @cid: The cid to use for allocation 981 * @pwrb_context: ptr to ptr to wrb context 982 * 983 * This happens under session_lock until submission to chip 984 */ 985 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 986 struct hwi_wrb_context **pcontext) 987 { 988 struct hwi_wrb_context *pwrb_context; 989 struct hwi_controller *phwi_ctrlr; 990 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 991 992 phwi_ctrlr = phba->phwi_ctrlr; 993 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 994 /* return the context address */ 995 *pcontext = pwrb_context; 996 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 997 } 998 999 static inline void 1000 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1001 struct wrb_handle *pwrb_handle, 1002 unsigned int wrbs_per_cxn) 1003 { 1004 spin_lock_bh(&pwrb_context->wrb_lock); 1005 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1006 pwrb_context->wrb_handles_available++; 1007 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1008 pwrb_context->free_index = 0; 1009 else 1010 pwrb_context->free_index++; 1011 spin_unlock_bh(&pwrb_context->wrb_lock); 1012 } 1013 1014 /** 1015 * free_wrb_handle - To free the wrb handle back to pool 1016 * @phba: The hba pointer 1017 * @pwrb_context: The context to free from 1018 * @pwrb_handle: The wrb_handle to free 1019 * 1020 * This happens under session_lock until submission to chip 1021 */ 1022 static void 1023 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1024 struct wrb_handle *pwrb_handle) 1025 { 1026 beiscsi_put_wrb_handle(pwrb_context, 1027 pwrb_handle, 1028 phba->params.wrbs_per_cxn); 1029 beiscsi_log(phba, KERN_INFO, 1030 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1031 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1032 "wrb_handles_available=%d\n", 1033 pwrb_handle, pwrb_context->free_index, 1034 pwrb_context->wrb_handles_available); 1035 } 1036 1037 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1038 { 1039 struct sgl_handle *psgl_handle; 1040 1041 spin_lock_bh(&phba->mgmt_sgl_lock); 1042 if (phba->eh_sgl_hndl_avbl) { 1043 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1044 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1045 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1046 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1047 phba->eh_sgl_alloc_index, 1048 phba->eh_sgl_alloc_index); 1049 1050 phba->eh_sgl_hndl_avbl--; 1051 if (phba->eh_sgl_alloc_index == 1052 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1053 1)) 1054 phba->eh_sgl_alloc_index = 0; 1055 else 1056 phba->eh_sgl_alloc_index++; 1057 } else 1058 psgl_handle = NULL; 1059 spin_unlock_bh(&phba->mgmt_sgl_lock); 1060 return psgl_handle; 1061 } 1062 1063 void 1064 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1065 { 1066 spin_lock_bh(&phba->mgmt_sgl_lock); 1067 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1068 "BM_%d : In free_mgmt_sgl_handle," 1069 "eh_sgl_free_index=%d\n", 1070 phba->eh_sgl_free_index); 1071 1072 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1073 /* 1074 * this can happen if clean_task is called on a task that 1075 * failed in xmit_task or alloc_pdu. 1076 */ 1077 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1078 "BM_%d : Double Free in eh SGL ," 1079 "eh_sgl_free_index=%d\n", 1080 phba->eh_sgl_free_index); 1081 spin_unlock_bh(&phba->mgmt_sgl_lock); 1082 return; 1083 } 1084 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1085 phba->eh_sgl_hndl_avbl++; 1086 if (phba->eh_sgl_free_index == 1087 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1088 phba->eh_sgl_free_index = 0; 1089 else 1090 phba->eh_sgl_free_index++; 1091 spin_unlock_bh(&phba->mgmt_sgl_lock); 1092 } 1093 1094 static void 1095 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1096 struct iscsi_task *task, 1097 struct common_sol_cqe *csol_cqe) 1098 { 1099 struct beiscsi_io_task *io_task = task->dd_data; 1100 struct be_status_bhs *sts_bhs = 1101 (struct be_status_bhs *)io_task->cmd_bhs; 1102 struct iscsi_conn *conn = beiscsi_conn->conn; 1103 unsigned char *sense; 1104 u32 resid = 0, exp_cmdsn, max_cmdsn; 1105 u8 rsp, status, flags; 1106 1107 exp_cmdsn = csol_cqe->exp_cmdsn; 1108 max_cmdsn = (csol_cqe->exp_cmdsn + 1109 csol_cqe->cmd_wnd - 1); 1110 rsp = csol_cqe->i_resp; 1111 status = csol_cqe->i_sts; 1112 flags = csol_cqe->i_flags; 1113 resid = csol_cqe->res_cnt; 1114 1115 if (!task->sc) { 1116 if (io_task->scsi_cmnd) { 1117 scsi_dma_unmap(io_task->scsi_cmnd); 1118 io_task->scsi_cmnd = NULL; 1119 } 1120 1121 return; 1122 } 1123 task->sc->result = (DID_OK << 16) | status; 1124 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1125 task->sc->result = DID_ERROR << 16; 1126 goto unmap; 1127 } 1128 1129 /* bidi not initially supported */ 1130 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1131 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1132 task->sc->result = DID_ERROR << 16; 1133 1134 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1135 scsi_set_resid(task->sc, resid); 1136 if (!status && (scsi_bufflen(task->sc) - resid < 1137 task->sc->underflow)) 1138 task->sc->result = DID_ERROR << 16; 1139 } 1140 } 1141 1142 if (status == SAM_STAT_CHECK_CONDITION) { 1143 u16 sense_len; 1144 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1145 1146 sense = sts_bhs->sense_info + sizeof(unsigned short); 1147 sense_len = be16_to_cpu(*slen); 1148 memcpy(task->sc->sense_buffer, sense, 1149 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1150 } 1151 1152 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1153 conn->rxdata_octets += resid; 1154 unmap: 1155 if (io_task->scsi_cmnd) { 1156 scsi_dma_unmap(io_task->scsi_cmnd); 1157 io_task->scsi_cmnd = NULL; 1158 } 1159 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1160 } 1161 1162 static void 1163 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1164 struct iscsi_task *task, 1165 struct common_sol_cqe *csol_cqe) 1166 { 1167 struct iscsi_logout_rsp *hdr; 1168 struct beiscsi_io_task *io_task = task->dd_data; 1169 struct iscsi_conn *conn = beiscsi_conn->conn; 1170 1171 hdr = (struct iscsi_logout_rsp *)task->hdr; 1172 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1173 hdr->t2wait = 5; 1174 hdr->t2retain = 0; 1175 hdr->flags = csol_cqe->i_flags; 1176 hdr->response = csol_cqe->i_resp; 1177 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1178 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1179 csol_cqe->cmd_wnd - 1); 1180 1181 hdr->dlength[0] = 0; 1182 hdr->dlength[1] = 0; 1183 hdr->dlength[2] = 0; 1184 hdr->hlength = 0; 1185 hdr->itt = io_task->libiscsi_itt; 1186 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1187 } 1188 1189 static void 1190 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1191 struct iscsi_task *task, 1192 struct common_sol_cqe *csol_cqe) 1193 { 1194 struct iscsi_tm_rsp *hdr; 1195 struct iscsi_conn *conn = beiscsi_conn->conn; 1196 struct beiscsi_io_task *io_task = task->dd_data; 1197 1198 hdr = (struct iscsi_tm_rsp *)task->hdr; 1199 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1200 hdr->flags = csol_cqe->i_flags; 1201 hdr->response = csol_cqe->i_resp; 1202 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1203 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1204 csol_cqe->cmd_wnd - 1); 1205 1206 hdr->itt = io_task->libiscsi_itt; 1207 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1208 } 1209 1210 static void 1211 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1212 struct beiscsi_hba *phba, struct sol_cqe *psol) 1213 { 1214 struct hwi_wrb_context *pwrb_context; 1215 uint16_t wrb_index, cid, cri_index; 1216 struct hwi_controller *phwi_ctrlr; 1217 struct wrb_handle *pwrb_handle; 1218 struct iscsi_task *task; 1219 1220 phwi_ctrlr = phba->phwi_ctrlr; 1221 if (is_chip_be2_be3r(phba)) { 1222 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1223 wrb_idx, psol); 1224 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1225 cid, psol); 1226 } else { 1227 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1228 wrb_idx, psol); 1229 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1230 cid, psol); 1231 } 1232 1233 cri_index = BE_GET_CRI_FROM_CID(cid); 1234 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1235 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1236 task = pwrb_handle->pio_handle; 1237 iscsi_put_task(task); 1238 } 1239 1240 static void 1241 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1242 struct iscsi_task *task, 1243 struct common_sol_cqe *csol_cqe) 1244 { 1245 struct iscsi_nopin *hdr; 1246 struct iscsi_conn *conn = beiscsi_conn->conn; 1247 struct beiscsi_io_task *io_task = task->dd_data; 1248 1249 hdr = (struct iscsi_nopin *)task->hdr; 1250 hdr->flags = csol_cqe->i_flags; 1251 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1252 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1253 csol_cqe->cmd_wnd - 1); 1254 1255 hdr->opcode = ISCSI_OP_NOOP_IN; 1256 hdr->itt = io_task->libiscsi_itt; 1257 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1258 } 1259 1260 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1261 struct sol_cqe *psol, 1262 struct common_sol_cqe *csol_cqe) 1263 { 1264 if (is_chip_be2_be3r(phba)) { 1265 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1266 i_exp_cmd_sn, psol); 1267 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1268 i_res_cnt, psol); 1269 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1270 i_cmd_wnd, psol); 1271 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1272 wrb_index, psol); 1273 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1274 cid, psol); 1275 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1276 hw_sts, psol); 1277 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1278 i_resp, psol); 1279 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1280 i_sts, psol); 1281 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1282 i_flags, psol); 1283 } else { 1284 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1285 i_exp_cmd_sn, psol); 1286 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1287 i_res_cnt, psol); 1288 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1289 wrb_index, psol); 1290 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1291 cid, psol); 1292 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1293 hw_sts, psol); 1294 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1295 i_cmd_wnd, psol); 1296 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1297 cmd_cmpl, psol)) 1298 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1299 i_sts, psol); 1300 else 1301 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1302 i_sts, psol); 1303 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1304 u, psol)) 1305 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1306 1307 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1308 o, psol)) 1309 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1310 } 1311 } 1312 1313 1314 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1315 struct beiscsi_hba *phba, struct sol_cqe *psol) 1316 { 1317 struct hwi_wrb_context *pwrb_context; 1318 struct wrb_handle *pwrb_handle; 1319 struct iscsi_wrb *pwrb = NULL; 1320 struct hwi_controller *phwi_ctrlr; 1321 struct iscsi_task *task; 1322 unsigned int type; 1323 struct iscsi_conn *conn = beiscsi_conn->conn; 1324 struct iscsi_session *session = conn->session; 1325 struct common_sol_cqe csol_cqe = {0}; 1326 uint16_t cri_index = 0; 1327 1328 phwi_ctrlr = phba->phwi_ctrlr; 1329 1330 /* Copy the elements to a common structure */ 1331 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1332 1333 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1334 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1335 1336 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1337 csol_cqe.wrb_index]; 1338 1339 task = pwrb_handle->pio_handle; 1340 pwrb = pwrb_handle->pwrb; 1341 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1342 1343 spin_lock_bh(&session->back_lock); 1344 switch (type) { 1345 case HWH_TYPE_IO: 1346 case HWH_TYPE_IO_RD: 1347 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1348 ISCSI_OP_NOOP_OUT) 1349 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1350 else 1351 be_complete_io(beiscsi_conn, task, &csol_cqe); 1352 break; 1353 1354 case HWH_TYPE_LOGOUT: 1355 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1356 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1357 else 1358 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1359 break; 1360 1361 case HWH_TYPE_LOGIN: 1362 beiscsi_log(phba, KERN_ERR, 1363 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1364 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1365 " hwi_complete_cmd- Solicited path\n"); 1366 break; 1367 1368 case HWH_TYPE_NOP: 1369 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1370 break; 1371 1372 default: 1373 beiscsi_log(phba, KERN_WARNING, 1374 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1375 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1376 "wrb_index 0x%x CID 0x%x\n", type, 1377 csol_cqe.wrb_index, 1378 csol_cqe.cid); 1379 break; 1380 } 1381 1382 spin_unlock_bh(&session->back_lock); 1383 } 1384 1385 /** 1386 * ASYNC PDUs include 1387 * a. Unsolicited NOP-In (target initiated NOP-In) 1388 * b. ASYNC Messages 1389 * c. Reject PDU 1390 * d. Login response 1391 * These headers arrive unprocessed by the EP firmware. 1392 * iSCSI layer processes them. 1393 */ 1394 static unsigned int 1395 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1396 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1397 { 1398 struct beiscsi_hba *phba = beiscsi_conn->phba; 1399 struct iscsi_conn *conn = beiscsi_conn->conn; 1400 struct beiscsi_io_task *io_task; 1401 struct iscsi_hdr *login_hdr; 1402 struct iscsi_task *task; 1403 u8 code; 1404 1405 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1406 switch (code) { 1407 case ISCSI_OP_NOOP_IN: 1408 pdata = NULL; 1409 dlen = 0; 1410 break; 1411 case ISCSI_OP_ASYNC_EVENT: 1412 break; 1413 case ISCSI_OP_REJECT: 1414 WARN_ON(!pdata); 1415 WARN_ON(!(dlen == 48)); 1416 beiscsi_log(phba, KERN_ERR, 1417 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1418 "BM_%d : In ISCSI_OP_REJECT\n"); 1419 break; 1420 case ISCSI_OP_LOGIN_RSP: 1421 case ISCSI_OP_TEXT_RSP: 1422 task = conn->login_task; 1423 io_task = task->dd_data; 1424 login_hdr = (struct iscsi_hdr *)phdr; 1425 login_hdr->itt = io_task->libiscsi_itt; 1426 break; 1427 default: 1428 beiscsi_log(phba, KERN_WARNING, 1429 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1430 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1431 code); 1432 return 1; 1433 } 1434 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1435 return 0; 1436 } 1437 1438 static inline void 1439 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1440 struct hd_async_handle *pasync_handle) 1441 { 1442 if (pasync_handle->is_header) { 1443 list_add_tail(&pasync_handle->link, 1444 &pasync_ctx->async_header.free_list); 1445 pasync_ctx->async_header.free_entries++; 1446 } else { 1447 list_add_tail(&pasync_handle->link, 1448 &pasync_ctx->async_data.free_list); 1449 pasync_ctx->async_data.free_entries++; 1450 } 1451 } 1452 1453 static struct hd_async_handle * 1454 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1455 struct hd_async_context *pasync_ctx, 1456 struct i_t_dpdu_cqe *pdpdu_cqe) 1457 { 1458 struct beiscsi_hba *phba = beiscsi_conn->phba; 1459 struct hd_async_handle *pasync_handle; 1460 struct be_bus_address phys_addr; 1461 u8 final, error = 0; 1462 u16 cid, code, ci; 1463 u32 dpl; 1464 1465 cid = beiscsi_conn->beiscsi_conn_cid; 1466 /** 1467 * This function is invoked to get the right async_handle structure 1468 * from a given DEF PDU CQ entry. 1469 * 1470 * - index in CQ entry gives the vertical index 1471 * - address in CQ entry is the offset where the DMA last ended 1472 * - final - no more notifications for this PDU 1473 */ 1474 if (is_chip_be2_be3r(phba)) { 1475 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1476 dpl, pdpdu_cqe); 1477 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1478 index, pdpdu_cqe); 1479 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1480 final, pdpdu_cqe); 1481 } else { 1482 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1483 dpl, pdpdu_cqe); 1484 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1485 index, pdpdu_cqe); 1486 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1487 final, pdpdu_cqe); 1488 } 1489 1490 /** 1491 * DB addr Hi/Lo is same for BE and SKH. 1492 * Subtract the dataplacementlength to get to the base. 1493 */ 1494 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1495 db_addr_lo, pdpdu_cqe); 1496 phys_addr.u.a32.address_lo -= dpl; 1497 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1498 db_addr_hi, pdpdu_cqe); 1499 1500 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1501 switch (code) { 1502 case UNSOL_HDR_NOTIFY: 1503 pasync_handle = pasync_ctx->async_entry[ci].header; 1504 break; 1505 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1506 error = 1; 1507 case UNSOL_DATA_NOTIFY: 1508 pasync_handle = pasync_ctx->async_entry[ci].data; 1509 break; 1510 /* called only for above codes */ 1511 default: 1512 pasync_handle = NULL; 1513 break; 1514 } 1515 1516 if (!pasync_handle) { 1517 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1518 "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n", 1519 cid, code, ci, phys_addr.u.a64.address); 1520 return pasync_handle; 1521 } 1522 1523 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1524 pasync_handle->index != ci) { 1525 /* driver bug - if ci does not match async handle index */ 1526 error = 1; 1527 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1528 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1529 cid, pasync_handle->is_header ? 'H' : 'D', 1530 pasync_handle->pa.u.a64.address, 1531 pasync_handle->index, 1532 phys_addr.u.a64.address, ci); 1533 /* FW has stale address - attempt continuing by dropping */ 1534 } 1535 1536 /** 1537 * Each CID is associated with unique CRI. 1538 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1539 **/ 1540 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1541 pasync_handle->is_final = final; 1542 pasync_handle->buffer_len = dpl; 1543 /* empty the slot */ 1544 if (pasync_handle->is_header) 1545 pasync_ctx->async_entry[ci].header = NULL; 1546 else 1547 pasync_ctx->async_entry[ci].data = NULL; 1548 1549 /** 1550 * DEF PDU header and data buffers with errors should be simply 1551 * dropped as there are no consumers for it. 1552 */ 1553 if (error) { 1554 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1555 pasync_handle = NULL; 1556 } 1557 return pasync_handle; 1558 } 1559 1560 static void 1561 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1562 struct hd_async_context *pasync_ctx, 1563 u16 cri) 1564 { 1565 struct hd_async_handle *pasync_handle, *tmp_handle; 1566 struct list_head *plist; 1567 1568 plist = &pasync_ctx->async_entry[cri].wq.list; 1569 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1570 list_del(&pasync_handle->link); 1571 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1572 } 1573 1574 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1575 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1576 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1577 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1578 } 1579 1580 static unsigned int 1581 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1582 struct hd_async_context *pasync_ctx, 1583 u16 cri) 1584 { 1585 struct iscsi_session *session = beiscsi_conn->conn->session; 1586 struct hd_async_handle *pasync_handle, *plast_handle; 1587 struct beiscsi_hba *phba = beiscsi_conn->phba; 1588 void *phdr = NULL, *pdata = NULL; 1589 u32 dlen = 0, status = 0; 1590 struct list_head *plist; 1591 1592 plist = &pasync_ctx->async_entry[cri].wq.list; 1593 plast_handle = NULL; 1594 list_for_each_entry(pasync_handle, plist, link) { 1595 plast_handle = pasync_handle; 1596 /* get the header, the first entry */ 1597 if (!phdr) { 1598 phdr = pasync_handle->pbuffer; 1599 continue; 1600 } 1601 /* use first buffer to collect all the data */ 1602 if (!pdata) { 1603 pdata = pasync_handle->pbuffer; 1604 dlen = pasync_handle->buffer_len; 1605 continue; 1606 } 1607 memcpy(pdata + dlen, pasync_handle->pbuffer, 1608 pasync_handle->buffer_len); 1609 dlen += pasync_handle->buffer_len; 1610 } 1611 1612 if (!plast_handle->is_final) { 1613 /* last handle should have final PDU notification from FW */ 1614 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1615 "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n", 1616 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1617 pasync_ctx->async_entry[cri].wq.hdr_len, 1618 pasync_ctx->async_entry[cri].wq.bytes_needed, 1619 pasync_ctx->async_entry[cri].wq.bytes_received); 1620 } 1621 spin_lock_bh(&session->back_lock); 1622 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1623 spin_unlock_bh(&session->back_lock); 1624 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1625 return status; 1626 } 1627 1628 static unsigned int 1629 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1630 struct hd_async_context *pasync_ctx, 1631 struct hd_async_handle *pasync_handle) 1632 { 1633 unsigned int bytes_needed = 0, status = 0; 1634 u16 cri = pasync_handle->cri; 1635 struct cri_wait_queue *wq; 1636 struct beiscsi_hba *phba; 1637 struct pdu_base *ppdu; 1638 char *err = ""; 1639 1640 phba = beiscsi_conn->phba; 1641 wq = &pasync_ctx->async_entry[cri].wq; 1642 if (pasync_handle->is_header) { 1643 /* check if PDU hdr is rcv'd when old hdr not completed */ 1644 if (wq->hdr_len) { 1645 err = "incomplete"; 1646 goto drop_pdu; 1647 } 1648 ppdu = pasync_handle->pbuffer; 1649 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1650 data_len_hi, ppdu); 1651 bytes_needed <<= 16; 1652 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1653 data_len_lo, ppdu)); 1654 wq->hdr_len = pasync_handle->buffer_len; 1655 wq->bytes_received = 0; 1656 wq->bytes_needed = bytes_needed; 1657 list_add_tail(&pasync_handle->link, &wq->list); 1658 if (!bytes_needed) 1659 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1660 pasync_ctx, cri); 1661 } else { 1662 /* check if data received has header and is needed */ 1663 if (!wq->hdr_len || !wq->bytes_needed) { 1664 err = "header less"; 1665 goto drop_pdu; 1666 } 1667 wq->bytes_received += pasync_handle->buffer_len; 1668 /* Something got overwritten? Better catch it here. */ 1669 if (wq->bytes_received > wq->bytes_needed) { 1670 err = "overflow"; 1671 goto drop_pdu; 1672 } 1673 list_add_tail(&pasync_handle->link, &wq->list); 1674 if (wq->bytes_received == wq->bytes_needed) 1675 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1676 pasync_ctx, cri); 1677 } 1678 return status; 1679 1680 drop_pdu: 1681 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1682 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1683 beiscsi_conn->beiscsi_conn_cid, err, 1684 pasync_handle->is_header ? 'H' : 'D', 1685 wq->hdr_len, wq->bytes_needed, 1686 pasync_handle->buffer_len); 1687 /* discard this handle */ 1688 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1689 /* free all the other handles in cri_wait_queue */ 1690 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1691 /* try continuing */ 1692 return status; 1693 } 1694 1695 static void 1696 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1697 u8 header, u8 ulp_num) 1698 { 1699 struct hd_async_handle *pasync_handle, *tmp, **slot; 1700 struct hd_async_context *pasync_ctx; 1701 struct hwi_controller *phwi_ctrlr; 1702 struct list_head *hfree_list; 1703 struct phys_addr *pasync_sge; 1704 u32 ring_id, doorbell = 0; 1705 u16 index, num_entries; 1706 u32 doorbell_offset; 1707 u16 prod = 0, cons; 1708 1709 phwi_ctrlr = phba->phwi_ctrlr; 1710 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1711 num_entries = pasync_ctx->num_entries; 1712 if (header) { 1713 cons = pasync_ctx->async_header.free_entries; 1714 hfree_list = &pasync_ctx->async_header.free_list; 1715 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1716 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1717 doorbell_offset; 1718 } else { 1719 cons = pasync_ctx->async_data.free_entries; 1720 hfree_list = &pasync_ctx->async_data.free_list; 1721 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1722 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1723 doorbell_offset; 1724 } 1725 /* number of entries posted must be in multiples of 8 */ 1726 if (cons % 8) 1727 return; 1728 1729 list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) { 1730 list_del_init(&pasync_handle->link); 1731 pasync_handle->is_final = 0; 1732 pasync_handle->buffer_len = 0; 1733 1734 /* handles can be consumed out of order, use index in handle */ 1735 index = pasync_handle->index; 1736 WARN_ON(pasync_handle->is_header != header); 1737 if (header) 1738 slot = &pasync_ctx->async_entry[index].header; 1739 else 1740 slot = &pasync_ctx->async_entry[index].data; 1741 /** 1742 * The slot just tracks handle's hold and release, so 1743 * overwriting at the same index won't do any harm but 1744 * needs to be caught. 1745 */ 1746 if (*slot != NULL) { 1747 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1748 "BM_%d : async PDU %s slot at %u not empty\n", 1749 header ? "header" : "data", index); 1750 } 1751 /** 1752 * We use same freed index as in completion to post so this 1753 * operation is not required for refills. Its required only 1754 * for ring creation. 1755 */ 1756 if (header) 1757 pasync_sge = pasync_ctx->async_header.ring_base; 1758 else 1759 pasync_sge = pasync_ctx->async_data.ring_base; 1760 pasync_sge += index; 1761 /* if its a refill then address is same; hi is lo */ 1762 WARN_ON(pasync_sge->hi && 1763 pasync_sge->hi != pasync_handle->pa.u.a32.address_lo); 1764 WARN_ON(pasync_sge->lo && 1765 pasync_sge->lo != pasync_handle->pa.u.a32.address_hi); 1766 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1767 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1768 1769 *slot = pasync_handle; 1770 if (++prod == cons) 1771 break; 1772 } 1773 if (header) 1774 pasync_ctx->async_header.free_entries -= prod; 1775 else 1776 pasync_ctx->async_data.free_entries -= prod; 1777 1778 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1779 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1780 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1781 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1782 iowrite32(doorbell, phba->db_va + doorbell_offset); 1783 } 1784 1785 static void 1786 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1787 struct i_t_dpdu_cqe *pdpdu_cqe) 1788 { 1789 struct beiscsi_hba *phba = beiscsi_conn->phba; 1790 struct hd_async_handle *pasync_handle = NULL; 1791 struct hd_async_context *pasync_ctx; 1792 struct hwi_controller *phwi_ctrlr; 1793 u16 cid_cri; 1794 u8 ulp_num; 1795 1796 phwi_ctrlr = phba->phwi_ctrlr; 1797 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1798 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1799 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1800 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1801 pdpdu_cqe); 1802 if (!pasync_handle) 1803 return; 1804 1805 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1806 beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num); 1807 } 1808 1809 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1810 { 1811 struct be_queue_info *mcc_cq; 1812 struct be_mcc_compl *mcc_compl; 1813 unsigned int num_processed = 0; 1814 1815 mcc_cq = &phba->ctrl.mcc_obj.cq; 1816 mcc_compl = queue_tail_node(mcc_cq); 1817 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1818 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1819 if (beiscsi_hba_in_error(phba)) 1820 return; 1821 1822 if (num_processed >= 32) { 1823 hwi_ring_cq_db(phba, mcc_cq->id, 1824 num_processed, 0); 1825 num_processed = 0; 1826 } 1827 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1828 beiscsi_process_async_event(phba, mcc_compl); 1829 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1830 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1831 } 1832 1833 mcc_compl->flags = 0; 1834 queue_tail_inc(mcc_cq); 1835 mcc_compl = queue_tail_node(mcc_cq); 1836 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1837 num_processed++; 1838 } 1839 1840 if (num_processed > 0) 1841 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1842 } 1843 1844 static void beiscsi_mcc_work(struct work_struct *work) 1845 { 1846 struct be_eq_obj *pbe_eq; 1847 struct beiscsi_hba *phba; 1848 1849 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1850 phba = pbe_eq->phba; 1851 beiscsi_process_mcc_cq(phba); 1852 /* rearm EQ for further interrupts */ 1853 if (!beiscsi_hba_in_error(phba)) 1854 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1855 } 1856 1857 /** 1858 * beiscsi_process_cq()- Process the Completion Queue 1859 * @pbe_eq: Event Q on which the Completion has come 1860 * @budget: Max number of events to processed 1861 * 1862 * return 1863 * Number of Completion Entries processed. 1864 **/ 1865 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1866 { 1867 struct be_queue_info *cq; 1868 struct sol_cqe *sol; 1869 struct dmsg_cqe *dmsg; 1870 unsigned int total = 0; 1871 unsigned int num_processed = 0; 1872 unsigned short code = 0, cid = 0; 1873 uint16_t cri_index = 0; 1874 struct beiscsi_conn *beiscsi_conn; 1875 struct beiscsi_endpoint *beiscsi_ep; 1876 struct iscsi_endpoint *ep; 1877 struct beiscsi_hba *phba; 1878 1879 cq = pbe_eq->cq; 1880 sol = queue_tail_node(cq); 1881 phba = pbe_eq->phba; 1882 1883 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1884 CQE_VALID_MASK) { 1885 if (beiscsi_hba_in_error(phba)) 1886 return 0; 1887 1888 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1889 1890 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 1891 32] & CQE_CODE_MASK); 1892 1893 /* Get the CID */ 1894 if (is_chip_be2_be3r(phba)) { 1895 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1896 } else { 1897 if ((code == DRIVERMSG_NOTIFY) || 1898 (code == UNSOL_HDR_NOTIFY) || 1899 (code == UNSOL_DATA_NOTIFY)) 1900 cid = AMAP_GET_BITS( 1901 struct amap_i_t_dpdu_cqe_v2, 1902 cid, sol); 1903 else 1904 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1905 cid, sol); 1906 } 1907 1908 cri_index = BE_GET_CRI_FROM_CID(cid); 1909 ep = phba->ep_array[cri_index]; 1910 1911 if (ep == NULL) { 1912 /* connection has already been freed 1913 * just move on to next one 1914 */ 1915 beiscsi_log(phba, KERN_WARNING, 1916 BEISCSI_LOG_INIT, 1917 "BM_%d : proc cqe of disconn ep: cid %d\n", 1918 cid); 1919 goto proc_next_cqe; 1920 } 1921 1922 beiscsi_ep = ep->dd_data; 1923 beiscsi_conn = beiscsi_ep->conn; 1924 1925 /* replenish cq */ 1926 if (num_processed == 32) { 1927 hwi_ring_cq_db(phba, cq->id, 32, 0); 1928 num_processed = 0; 1929 } 1930 total++; 1931 1932 switch (code) { 1933 case SOL_CMD_COMPLETE: 1934 hwi_complete_cmd(beiscsi_conn, phba, sol); 1935 break; 1936 case DRIVERMSG_NOTIFY: 1937 beiscsi_log(phba, KERN_INFO, 1938 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1939 "BM_%d : Received %s[%d] on CID : %d\n", 1940 cqe_desc[code], code, cid); 1941 1942 dmsg = (struct dmsg_cqe *)sol; 1943 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1944 break; 1945 case UNSOL_HDR_NOTIFY: 1946 beiscsi_log(phba, KERN_INFO, 1947 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1948 "BM_%d : Received %s[%d] on CID : %d\n", 1949 cqe_desc[code], code, cid); 1950 1951 spin_lock_bh(&phba->async_pdu_lock); 1952 beiscsi_hdq_process_compl(beiscsi_conn, 1953 (struct i_t_dpdu_cqe *)sol); 1954 spin_unlock_bh(&phba->async_pdu_lock); 1955 break; 1956 case UNSOL_DATA_NOTIFY: 1957 beiscsi_log(phba, KERN_INFO, 1958 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1959 "BM_%d : Received %s[%d] on CID : %d\n", 1960 cqe_desc[code], code, cid); 1961 1962 spin_lock_bh(&phba->async_pdu_lock); 1963 beiscsi_hdq_process_compl(beiscsi_conn, 1964 (struct i_t_dpdu_cqe *)sol); 1965 spin_unlock_bh(&phba->async_pdu_lock); 1966 break; 1967 case CXN_INVALIDATE_INDEX_NOTIFY: 1968 case CMD_INVALIDATED_NOTIFY: 1969 case CXN_INVALIDATE_NOTIFY: 1970 beiscsi_log(phba, KERN_ERR, 1971 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1972 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1973 cqe_desc[code], code, cid); 1974 break; 1975 case CXN_KILLED_HDR_DIGEST_ERR: 1976 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1977 beiscsi_log(phba, KERN_ERR, 1978 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1979 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1980 cqe_desc[code], code, cid); 1981 break; 1982 case CMD_KILLED_INVALID_STATSN_RCVD: 1983 case CMD_KILLED_INVALID_R2T_RCVD: 1984 case CMD_CXN_KILLED_LUN_INVALID: 1985 case CMD_CXN_KILLED_ICD_INVALID: 1986 case CMD_CXN_KILLED_ITT_INVALID: 1987 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1988 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1989 beiscsi_log(phba, KERN_ERR, 1990 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1991 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1992 cqe_desc[code], code, cid); 1993 break; 1994 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1995 beiscsi_log(phba, KERN_ERR, 1996 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1997 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1998 cqe_desc[code], code, cid); 1999 spin_lock_bh(&phba->async_pdu_lock); 2000 /* driver consumes the entry and drops the contents */ 2001 beiscsi_hdq_process_compl(beiscsi_conn, 2002 (struct i_t_dpdu_cqe *)sol); 2003 spin_unlock_bh(&phba->async_pdu_lock); 2004 break; 2005 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2006 case CXN_KILLED_BURST_LEN_MISMATCH: 2007 case CXN_KILLED_AHS_RCVD: 2008 case CXN_KILLED_UNKNOWN_HDR: 2009 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2010 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2011 case CXN_KILLED_TIMED_OUT: 2012 case CXN_KILLED_FIN_RCVD: 2013 case CXN_KILLED_RST_SENT: 2014 case CXN_KILLED_RST_RCVD: 2015 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2016 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2017 case CXN_KILLED_OVER_RUN_RESIDUAL: 2018 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2019 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2020 beiscsi_log(phba, KERN_ERR, 2021 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2022 "BM_%d : Event %s[%d] received on CID : %d\n", 2023 cqe_desc[code], code, cid); 2024 if (beiscsi_conn) 2025 iscsi_conn_failure(beiscsi_conn->conn, 2026 ISCSI_ERR_CONN_FAILED); 2027 break; 2028 default: 2029 beiscsi_log(phba, KERN_ERR, 2030 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2031 "BM_%d : Invalid CQE Event Received Code : %d" 2032 "CID 0x%x...\n", 2033 code, cid); 2034 break; 2035 } 2036 2037 proc_next_cqe: 2038 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2039 queue_tail_inc(cq); 2040 sol = queue_tail_node(cq); 2041 num_processed++; 2042 if (total == budget) 2043 break; 2044 } 2045 2046 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2047 return total; 2048 } 2049 2050 static int be_iopoll(struct irq_poll *iop, int budget) 2051 { 2052 unsigned int ret, io_events; 2053 struct beiscsi_hba *phba; 2054 struct be_eq_obj *pbe_eq; 2055 struct be_eq_entry *eqe = NULL; 2056 struct be_queue_info *eq; 2057 2058 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2059 phba = pbe_eq->phba; 2060 if (beiscsi_hba_in_error(phba)) { 2061 irq_poll_complete(iop); 2062 return 0; 2063 } 2064 2065 io_events = 0; 2066 eq = &pbe_eq->q; 2067 eqe = queue_tail_node(eq); 2068 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2069 EQE_VALID_MASK) { 2070 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2071 queue_tail_inc(eq); 2072 eqe = queue_tail_node(eq); 2073 io_events++; 2074 } 2075 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2076 2077 ret = beiscsi_process_cq(pbe_eq, budget); 2078 pbe_eq->cq_count += ret; 2079 if (ret < budget) { 2080 irq_poll_complete(iop); 2081 beiscsi_log(phba, KERN_INFO, 2082 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2083 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2084 pbe_eq->q.id, ret); 2085 if (!beiscsi_hba_in_error(phba)) 2086 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2087 } 2088 return ret; 2089 } 2090 2091 static void 2092 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2093 unsigned int num_sg, struct beiscsi_io_task *io_task) 2094 { 2095 struct iscsi_sge *psgl; 2096 unsigned int sg_len, index; 2097 unsigned int sge_len = 0; 2098 unsigned long long addr; 2099 struct scatterlist *l_sg; 2100 unsigned int offset; 2101 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2103 io_task->bhs_pa.u.a32.address_lo); 2104 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2105 io_task->bhs_pa.u.a32.address_hi); 2106 2107 l_sg = sg; 2108 for (index = 0; (index < num_sg) && (index < 2); index++, 2109 sg = sg_next(sg)) { 2110 if (index == 0) { 2111 sg_len = sg_dma_len(sg); 2112 addr = (u64) sg_dma_address(sg); 2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2114 sge0_addr_lo, pwrb, 2115 lower_32_bits(addr)); 2116 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2117 sge0_addr_hi, pwrb, 2118 upper_32_bits(addr)); 2119 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2120 sge0_len, pwrb, 2121 sg_len); 2122 sge_len = sg_len; 2123 } else { 2124 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2125 pwrb, sge_len); 2126 sg_len = sg_dma_len(sg); 2127 addr = (u64) sg_dma_address(sg); 2128 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2129 sge1_addr_lo, pwrb, 2130 lower_32_bits(addr)); 2131 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2132 sge1_addr_hi, pwrb, 2133 upper_32_bits(addr)); 2134 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2135 sge1_len, pwrb, 2136 sg_len); 2137 } 2138 } 2139 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2140 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2141 2142 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2143 2144 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2145 io_task->bhs_pa.u.a32.address_hi); 2146 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2147 io_task->bhs_pa.u.a32.address_lo); 2148 2149 if (num_sg == 1) { 2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2151 1); 2152 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2153 0); 2154 } else if (num_sg == 2) { 2155 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2156 0); 2157 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2158 1); 2159 } else { 2160 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2161 0); 2162 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2163 0); 2164 } 2165 2166 sg = l_sg; 2167 psgl++; 2168 psgl++; 2169 offset = 0; 2170 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2171 sg_len = sg_dma_len(sg); 2172 addr = (u64) sg_dma_address(sg); 2173 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2174 lower_32_bits(addr)); 2175 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2176 upper_32_bits(addr)); 2177 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2178 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2179 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2180 offset += sg_len; 2181 } 2182 psgl--; 2183 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2184 } 2185 2186 static void 2187 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2188 unsigned int num_sg, struct beiscsi_io_task *io_task) 2189 { 2190 struct iscsi_sge *psgl; 2191 unsigned int sg_len, index; 2192 unsigned int sge_len = 0; 2193 unsigned long long addr; 2194 struct scatterlist *l_sg; 2195 unsigned int offset; 2196 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2198 io_task->bhs_pa.u.a32.address_lo); 2199 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2200 io_task->bhs_pa.u.a32.address_hi); 2201 2202 l_sg = sg; 2203 for (index = 0; (index < num_sg) && (index < 2); index++, 2204 sg = sg_next(sg)) { 2205 if (index == 0) { 2206 sg_len = sg_dma_len(sg); 2207 addr = (u64) sg_dma_address(sg); 2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2209 ((u32)(addr & 0xFFFFFFFF))); 2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2211 ((u32)(addr >> 32))); 2212 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2213 sg_len); 2214 sge_len = sg_len; 2215 } else { 2216 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2217 pwrb, sge_len); 2218 sg_len = sg_dma_len(sg); 2219 addr = (u64) sg_dma_address(sg); 2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2221 ((u32)(addr & 0xFFFFFFFF))); 2222 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2223 ((u32)(addr >> 32))); 2224 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2225 sg_len); 2226 } 2227 } 2228 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2229 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2230 2231 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2232 2233 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2234 io_task->bhs_pa.u.a32.address_hi); 2235 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2236 io_task->bhs_pa.u.a32.address_lo); 2237 2238 if (num_sg == 1) { 2239 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2240 1); 2241 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2242 0); 2243 } else if (num_sg == 2) { 2244 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2245 0); 2246 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2247 1); 2248 } else { 2249 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2250 0); 2251 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2252 0); 2253 } 2254 sg = l_sg; 2255 psgl++; 2256 psgl++; 2257 offset = 0; 2258 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2259 sg_len = sg_dma_len(sg); 2260 addr = (u64) sg_dma_address(sg); 2261 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2262 (addr & 0xFFFFFFFF)); 2263 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2264 (addr >> 32)); 2265 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2266 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2267 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2268 offset += sg_len; 2269 } 2270 psgl--; 2271 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2272 } 2273 2274 /** 2275 * hwi_write_buffer()- Populate the WRB with task info 2276 * @pwrb: ptr to the WRB entry 2277 * @task: iscsi task which is to be executed 2278 **/ 2279 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2280 { 2281 struct iscsi_sge *psgl; 2282 struct beiscsi_io_task *io_task = task->dd_data; 2283 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2284 struct beiscsi_hba *phba = beiscsi_conn->phba; 2285 uint8_t dsp_value = 0; 2286 2287 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2289 io_task->bhs_pa.u.a32.address_lo); 2290 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2291 io_task->bhs_pa.u.a32.address_hi); 2292 2293 if (task->data) { 2294 2295 /* Check for the data_count */ 2296 dsp_value = (task->data_count) ? 1 : 0; 2297 2298 if (is_chip_be2_be3r(phba)) 2299 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2300 pwrb, dsp_value); 2301 else 2302 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2303 pwrb, dsp_value); 2304 2305 /* Map addr only if there is data_count */ 2306 if (dsp_value) { 2307 io_task->mtask_addr = pci_map_single(phba->pcidev, 2308 task->data, 2309 task->data_count, 2310 PCI_DMA_TODEVICE); 2311 if (pci_dma_mapping_error(phba->pcidev, 2312 io_task->mtask_addr)) 2313 return -ENOMEM; 2314 io_task->mtask_data_count = task->data_count; 2315 } else 2316 io_task->mtask_addr = 0; 2317 2318 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2319 lower_32_bits(io_task->mtask_addr)); 2320 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2321 upper_32_bits(io_task->mtask_addr)); 2322 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2323 task->data_count); 2324 2325 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2326 } else { 2327 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2328 io_task->mtask_addr = 0; 2329 } 2330 2331 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2332 2333 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2334 2335 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2336 io_task->bhs_pa.u.a32.address_hi); 2337 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2338 io_task->bhs_pa.u.a32.address_lo); 2339 if (task->data) { 2340 psgl++; 2341 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2342 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2343 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2344 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2345 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2346 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2347 2348 psgl++; 2349 if (task->data) { 2350 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2351 lower_32_bits(io_task->mtask_addr)); 2352 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2353 upper_32_bits(io_task->mtask_addr)); 2354 } 2355 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2356 } 2357 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2358 return 0; 2359 } 2360 2361 /** 2362 * beiscsi_find_mem_req()- Find mem needed 2363 * @phba: ptr to HBA struct 2364 **/ 2365 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2366 { 2367 uint8_t mem_descr_index, ulp_num; 2368 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2369 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2370 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2371 2372 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2373 sizeof(struct sol_cqe)); 2374 2375 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2376 2377 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2378 BE_ISCSI_PDU_HEADER_SIZE; 2379 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2380 sizeof(struct hwi_context_memory); 2381 2382 2383 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2384 * (phba->params.wrbs_per_cxn) 2385 * phba->params.cxns_per_ctrl; 2386 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2387 (phba->params.wrbs_per_cxn); 2388 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2389 phba->params.cxns_per_ctrl); 2390 2391 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2392 phba->params.icds_per_ctrl; 2393 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2394 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2395 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2396 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2397 2398 num_async_pdu_buf_sgl_pages = 2399 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2400 phba, ulp_num) * 2401 sizeof(struct phys_addr)); 2402 2403 num_async_pdu_buf_pages = 2404 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2405 phba, ulp_num) * 2406 phba->params.defpdu_hdr_sz); 2407 2408 num_async_pdu_data_pages = 2409 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2410 phba, ulp_num) * 2411 phba->params.defpdu_data_sz); 2412 2413 num_async_pdu_data_sgl_pages = 2414 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2415 phba, ulp_num) * 2416 sizeof(struct phys_addr)); 2417 2418 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2419 (ulp_num * MEM_DESCR_OFFSET)); 2420 phba->mem_req[mem_descr_index] = 2421 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2422 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2423 2424 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2425 (ulp_num * MEM_DESCR_OFFSET)); 2426 phba->mem_req[mem_descr_index] = 2427 num_async_pdu_buf_pages * 2428 PAGE_SIZE; 2429 2430 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2431 (ulp_num * MEM_DESCR_OFFSET)); 2432 phba->mem_req[mem_descr_index] = 2433 num_async_pdu_data_pages * 2434 PAGE_SIZE; 2435 2436 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2437 (ulp_num * MEM_DESCR_OFFSET)); 2438 phba->mem_req[mem_descr_index] = 2439 num_async_pdu_buf_sgl_pages * 2440 PAGE_SIZE; 2441 2442 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2443 (ulp_num * MEM_DESCR_OFFSET)); 2444 phba->mem_req[mem_descr_index] = 2445 num_async_pdu_data_sgl_pages * 2446 PAGE_SIZE; 2447 2448 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2449 (ulp_num * MEM_DESCR_OFFSET)); 2450 phba->mem_req[mem_descr_index] = 2451 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2452 sizeof(struct hd_async_handle); 2453 2454 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2455 (ulp_num * MEM_DESCR_OFFSET)); 2456 phba->mem_req[mem_descr_index] = 2457 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2458 sizeof(struct hd_async_handle); 2459 2460 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2461 (ulp_num * MEM_DESCR_OFFSET)); 2462 phba->mem_req[mem_descr_index] = 2463 sizeof(struct hd_async_context) + 2464 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2465 sizeof(struct hd_async_entry)); 2466 } 2467 } 2468 } 2469 2470 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2471 { 2472 dma_addr_t bus_add; 2473 struct hwi_controller *phwi_ctrlr; 2474 struct be_mem_descriptor *mem_descr; 2475 struct mem_array *mem_arr, *mem_arr_orig; 2476 unsigned int i, j, alloc_size, curr_alloc_size; 2477 2478 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2479 if (!phba->phwi_ctrlr) 2480 return -ENOMEM; 2481 2482 /* Allocate memory for wrb_context */ 2483 phwi_ctrlr = phba->phwi_ctrlr; 2484 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2485 phba->params.cxns_per_ctrl, 2486 GFP_KERNEL); 2487 if (!phwi_ctrlr->wrb_context) { 2488 kfree(phba->phwi_ctrlr); 2489 return -ENOMEM; 2490 } 2491 2492 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2493 GFP_KERNEL); 2494 if (!phba->init_mem) { 2495 kfree(phwi_ctrlr->wrb_context); 2496 kfree(phba->phwi_ctrlr); 2497 return -ENOMEM; 2498 } 2499 2500 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2501 GFP_KERNEL); 2502 if (!mem_arr_orig) { 2503 kfree(phba->init_mem); 2504 kfree(phwi_ctrlr->wrb_context); 2505 kfree(phba->phwi_ctrlr); 2506 return -ENOMEM; 2507 } 2508 2509 mem_descr = phba->init_mem; 2510 for (i = 0; i < SE_MEM_MAX; i++) { 2511 if (!phba->mem_req[i]) { 2512 mem_descr->mem_array = NULL; 2513 mem_descr++; 2514 continue; 2515 } 2516 2517 j = 0; 2518 mem_arr = mem_arr_orig; 2519 alloc_size = phba->mem_req[i]; 2520 memset(mem_arr, 0, sizeof(struct mem_array) * 2521 BEISCSI_MAX_FRAGS_INIT); 2522 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2523 do { 2524 mem_arr->virtual_address = pci_alloc_consistent( 2525 phba->pcidev, 2526 curr_alloc_size, 2527 &bus_add); 2528 if (!mem_arr->virtual_address) { 2529 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2530 goto free_mem; 2531 if (curr_alloc_size - 2532 rounddown_pow_of_two(curr_alloc_size)) 2533 curr_alloc_size = rounddown_pow_of_two 2534 (curr_alloc_size); 2535 else 2536 curr_alloc_size = curr_alloc_size / 2; 2537 } else { 2538 mem_arr->bus_address.u. 2539 a64.address = (__u64) bus_add; 2540 mem_arr->size = curr_alloc_size; 2541 alloc_size -= curr_alloc_size; 2542 curr_alloc_size = min(be_max_phys_size * 2543 1024, alloc_size); 2544 j++; 2545 mem_arr++; 2546 } 2547 } while (alloc_size); 2548 mem_descr->num_elements = j; 2549 mem_descr->size_in_bytes = phba->mem_req[i]; 2550 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2551 GFP_KERNEL); 2552 if (!mem_descr->mem_array) 2553 goto free_mem; 2554 2555 memcpy(mem_descr->mem_array, mem_arr_orig, 2556 sizeof(struct mem_array) * j); 2557 mem_descr++; 2558 } 2559 kfree(mem_arr_orig); 2560 return 0; 2561 free_mem: 2562 mem_descr->num_elements = j; 2563 while ((i) || (j)) { 2564 for (j = mem_descr->num_elements; j > 0; j--) { 2565 pci_free_consistent(phba->pcidev, 2566 mem_descr->mem_array[j - 1].size, 2567 mem_descr->mem_array[j - 1]. 2568 virtual_address, 2569 (unsigned long)mem_descr-> 2570 mem_array[j - 1]. 2571 bus_address.u.a64.address); 2572 } 2573 if (i) { 2574 i--; 2575 kfree(mem_descr->mem_array); 2576 mem_descr--; 2577 } 2578 } 2579 kfree(mem_arr_orig); 2580 kfree(phba->init_mem); 2581 kfree(phba->phwi_ctrlr->wrb_context); 2582 kfree(phba->phwi_ctrlr); 2583 return -ENOMEM; 2584 } 2585 2586 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2587 { 2588 beiscsi_find_mem_req(phba); 2589 return beiscsi_alloc_mem(phba); 2590 } 2591 2592 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2593 { 2594 struct pdu_data_out *pdata_out; 2595 struct pdu_nop_out *pnop_out; 2596 struct be_mem_descriptor *mem_descr; 2597 2598 mem_descr = phba->init_mem; 2599 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2600 pdata_out = 2601 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2602 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2603 2604 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2605 IIOC_SCSI_DATA); 2606 2607 pnop_out = 2608 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2609 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2610 2611 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2612 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2613 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2614 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2615 } 2616 2617 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2618 { 2619 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2620 struct hwi_context_memory *phwi_ctxt; 2621 struct wrb_handle *pwrb_handle = NULL; 2622 struct hwi_controller *phwi_ctrlr; 2623 struct hwi_wrb_context *pwrb_context; 2624 struct iscsi_wrb *pwrb = NULL; 2625 unsigned int num_cxn_wrbh = 0; 2626 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2627 2628 mem_descr_wrbh = phba->init_mem; 2629 mem_descr_wrbh += HWI_MEM_WRBH; 2630 2631 mem_descr_wrb = phba->init_mem; 2632 mem_descr_wrb += HWI_MEM_WRB; 2633 phwi_ctrlr = phba->phwi_ctrlr; 2634 2635 /* Allocate memory for WRBQ */ 2636 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2637 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2638 phba->params.cxns_per_ctrl, 2639 GFP_KERNEL); 2640 if (!phwi_ctxt->be_wrbq) { 2641 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2642 "BM_%d : WRBQ Mem Alloc Failed\n"); 2643 return -ENOMEM; 2644 } 2645 2646 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2647 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2648 pwrb_context->pwrb_handle_base = 2649 kzalloc(sizeof(struct wrb_handle *) * 2650 phba->params.wrbs_per_cxn, GFP_KERNEL); 2651 if (!pwrb_context->pwrb_handle_base) { 2652 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2653 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2654 goto init_wrb_hndl_failed; 2655 } 2656 pwrb_context->pwrb_handle_basestd = 2657 kzalloc(sizeof(struct wrb_handle *) * 2658 phba->params.wrbs_per_cxn, GFP_KERNEL); 2659 if (!pwrb_context->pwrb_handle_basestd) { 2660 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2661 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2662 goto init_wrb_hndl_failed; 2663 } 2664 if (!num_cxn_wrbh) { 2665 pwrb_handle = 2666 mem_descr_wrbh->mem_array[idx].virtual_address; 2667 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2668 ((sizeof(struct wrb_handle)) * 2669 phba->params.wrbs_per_cxn)); 2670 idx++; 2671 } 2672 pwrb_context->alloc_index = 0; 2673 pwrb_context->wrb_handles_available = 0; 2674 pwrb_context->free_index = 0; 2675 2676 if (num_cxn_wrbh) { 2677 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2678 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2679 pwrb_context->pwrb_handle_basestd[j] = 2680 pwrb_handle; 2681 pwrb_context->wrb_handles_available++; 2682 pwrb_handle->wrb_index = j; 2683 pwrb_handle++; 2684 } 2685 num_cxn_wrbh--; 2686 } 2687 spin_lock_init(&pwrb_context->wrb_lock); 2688 } 2689 idx = 0; 2690 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2691 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2692 if (!num_cxn_wrb) { 2693 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2694 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2695 ((sizeof(struct iscsi_wrb) * 2696 phba->params.wrbs_per_cxn)); 2697 idx++; 2698 } 2699 2700 if (num_cxn_wrb) { 2701 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2702 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2703 pwrb_handle->pwrb = pwrb; 2704 pwrb++; 2705 } 2706 num_cxn_wrb--; 2707 } 2708 } 2709 return 0; 2710 init_wrb_hndl_failed: 2711 for (j = index; j > 0; j--) { 2712 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2713 kfree(pwrb_context->pwrb_handle_base); 2714 kfree(pwrb_context->pwrb_handle_basestd); 2715 } 2716 return -ENOMEM; 2717 } 2718 2719 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2720 { 2721 uint8_t ulp_num; 2722 struct hwi_controller *phwi_ctrlr; 2723 struct hba_parameters *p = &phba->params; 2724 struct hd_async_context *pasync_ctx; 2725 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2726 unsigned int index, idx, num_per_mem, num_async_data; 2727 struct be_mem_descriptor *mem_descr; 2728 2729 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2730 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2731 /* get async_ctx for each ULP */ 2732 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2733 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2734 (ulp_num * MEM_DESCR_OFFSET)); 2735 2736 phwi_ctrlr = phba->phwi_ctrlr; 2737 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2738 (struct hd_async_context *) 2739 mem_descr->mem_array[0].virtual_address; 2740 2741 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2742 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2743 2744 pasync_ctx->async_entry = 2745 (struct hd_async_entry *) 2746 ((long unsigned int)pasync_ctx + 2747 sizeof(struct hd_async_context)); 2748 2749 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2750 ulp_num); 2751 /* setup header buffers */ 2752 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2753 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2754 (ulp_num * MEM_DESCR_OFFSET); 2755 if (mem_descr->mem_array[0].virtual_address) { 2756 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2757 "BM_%d : hwi_init_async_pdu_ctx" 2758 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2759 ulp_num, 2760 mem_descr->mem_array[0]. 2761 virtual_address); 2762 } else 2763 beiscsi_log(phba, KERN_WARNING, 2764 BEISCSI_LOG_INIT, 2765 "BM_%d : No Virtual address for ULP : %d\n", 2766 ulp_num); 2767 2768 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2769 pasync_ctx->async_header.va_base = 2770 mem_descr->mem_array[0].virtual_address; 2771 2772 pasync_ctx->async_header.pa_base.u.a64.address = 2773 mem_descr->mem_array[0]. 2774 bus_address.u.a64.address; 2775 2776 /* setup header buffer sgls */ 2777 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2778 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2779 (ulp_num * MEM_DESCR_OFFSET); 2780 if (mem_descr->mem_array[0].virtual_address) { 2781 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2782 "BM_%d : hwi_init_async_pdu_ctx" 2783 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2784 ulp_num, 2785 mem_descr->mem_array[0]. 2786 virtual_address); 2787 } else 2788 beiscsi_log(phba, KERN_WARNING, 2789 BEISCSI_LOG_INIT, 2790 "BM_%d : No Virtual address for ULP : %d\n", 2791 ulp_num); 2792 2793 pasync_ctx->async_header.ring_base = 2794 mem_descr->mem_array[0].virtual_address; 2795 2796 /* setup header buffer handles */ 2797 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2798 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2799 (ulp_num * MEM_DESCR_OFFSET); 2800 if (mem_descr->mem_array[0].virtual_address) { 2801 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2802 "BM_%d : hwi_init_async_pdu_ctx" 2803 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2804 ulp_num, 2805 mem_descr->mem_array[0]. 2806 virtual_address); 2807 } else 2808 beiscsi_log(phba, KERN_WARNING, 2809 BEISCSI_LOG_INIT, 2810 "BM_%d : No Virtual address for ULP : %d\n", 2811 ulp_num); 2812 2813 pasync_ctx->async_header.handle_base = 2814 mem_descr->mem_array[0].virtual_address; 2815 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2816 2817 /* setup data buffer sgls */ 2818 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2819 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2820 (ulp_num * MEM_DESCR_OFFSET); 2821 if (mem_descr->mem_array[0].virtual_address) { 2822 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2823 "BM_%d : hwi_init_async_pdu_ctx" 2824 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2825 ulp_num, 2826 mem_descr->mem_array[0]. 2827 virtual_address); 2828 } else 2829 beiscsi_log(phba, KERN_WARNING, 2830 BEISCSI_LOG_INIT, 2831 "BM_%d : No Virtual address for ULP : %d\n", 2832 ulp_num); 2833 2834 pasync_ctx->async_data.ring_base = 2835 mem_descr->mem_array[0].virtual_address; 2836 2837 /* setup data buffer handles */ 2838 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2839 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2840 (ulp_num * MEM_DESCR_OFFSET); 2841 if (!mem_descr->mem_array[0].virtual_address) 2842 beiscsi_log(phba, KERN_WARNING, 2843 BEISCSI_LOG_INIT, 2844 "BM_%d : No Virtual address for ULP : %d\n", 2845 ulp_num); 2846 2847 pasync_ctx->async_data.handle_base = 2848 mem_descr->mem_array[0].virtual_address; 2849 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2850 2851 pasync_header_h = 2852 (struct hd_async_handle *) 2853 pasync_ctx->async_header.handle_base; 2854 pasync_data_h = 2855 (struct hd_async_handle *) 2856 pasync_ctx->async_data.handle_base; 2857 2858 /* setup data buffers */ 2859 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2860 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2861 (ulp_num * MEM_DESCR_OFFSET); 2862 if (mem_descr->mem_array[0].virtual_address) { 2863 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2864 "BM_%d : hwi_init_async_pdu_ctx" 2865 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2866 ulp_num, 2867 mem_descr->mem_array[0]. 2868 virtual_address); 2869 } else 2870 beiscsi_log(phba, KERN_WARNING, 2871 BEISCSI_LOG_INIT, 2872 "BM_%d : No Virtual address for ULP : %d\n", 2873 ulp_num); 2874 2875 idx = 0; 2876 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2877 pasync_ctx->async_data.va_base = 2878 mem_descr->mem_array[idx].virtual_address; 2879 pasync_ctx->async_data.pa_base.u.a64.address = 2880 mem_descr->mem_array[idx]. 2881 bus_address.u.a64.address; 2882 2883 num_async_data = ((mem_descr->mem_array[idx].size) / 2884 phba->params.defpdu_data_sz); 2885 num_per_mem = 0; 2886 2887 for (index = 0; index < BEISCSI_GET_CID_COUNT 2888 (phba, ulp_num); index++) { 2889 pasync_header_h->cri = -1; 2890 pasync_header_h->is_header = 1; 2891 pasync_header_h->index = index; 2892 INIT_LIST_HEAD(&pasync_header_h->link); 2893 pasync_header_h->pbuffer = 2894 (void *)((unsigned long) 2895 (pasync_ctx-> 2896 async_header.va_base) + 2897 (p->defpdu_hdr_sz * index)); 2898 2899 pasync_header_h->pa.u.a64.address = 2900 pasync_ctx->async_header.pa_base.u.a64. 2901 address + (p->defpdu_hdr_sz * index); 2902 2903 list_add_tail(&pasync_header_h->link, 2904 &pasync_ctx->async_header. 2905 free_list); 2906 pasync_header_h++; 2907 pasync_ctx->async_header.free_entries++; 2908 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2909 wq.list); 2910 pasync_ctx->async_entry[index].header = NULL; 2911 2912 pasync_data_h->cri = -1; 2913 pasync_data_h->is_header = 0; 2914 pasync_data_h->index = index; 2915 INIT_LIST_HEAD(&pasync_data_h->link); 2916 2917 if (!num_async_data) { 2918 num_per_mem = 0; 2919 idx++; 2920 pasync_ctx->async_data.va_base = 2921 mem_descr->mem_array[idx]. 2922 virtual_address; 2923 pasync_ctx->async_data.pa_base.u. 2924 a64.address = 2925 mem_descr->mem_array[idx]. 2926 bus_address.u.a64.address; 2927 num_async_data = 2928 ((mem_descr->mem_array[idx]. 2929 size) / 2930 phba->params.defpdu_data_sz); 2931 } 2932 pasync_data_h->pbuffer = 2933 (void *)((unsigned long) 2934 (pasync_ctx->async_data.va_base) + 2935 (p->defpdu_data_sz * num_per_mem)); 2936 2937 pasync_data_h->pa.u.a64.address = 2938 pasync_ctx->async_data.pa_base.u.a64. 2939 address + (p->defpdu_data_sz * 2940 num_per_mem); 2941 num_per_mem++; 2942 num_async_data--; 2943 2944 list_add_tail(&pasync_data_h->link, 2945 &pasync_ctx->async_data. 2946 free_list); 2947 pasync_data_h++; 2948 pasync_ctx->async_data.free_entries++; 2949 pasync_ctx->async_entry[index].data = NULL; 2950 } 2951 } 2952 } 2953 2954 return 0; 2955 } 2956 2957 static int 2958 be_sgl_create_contiguous(void *virtual_address, 2959 u64 physical_address, u32 length, 2960 struct be_dma_mem *sgl) 2961 { 2962 WARN_ON(!virtual_address); 2963 WARN_ON(!physical_address); 2964 WARN_ON(!length); 2965 WARN_ON(!sgl); 2966 2967 sgl->va = virtual_address; 2968 sgl->dma = (unsigned long)physical_address; 2969 sgl->size = length; 2970 2971 return 0; 2972 } 2973 2974 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2975 { 2976 memset(sgl, 0, sizeof(*sgl)); 2977 } 2978 2979 static void 2980 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2981 struct mem_array *pmem, struct be_dma_mem *sgl) 2982 { 2983 if (sgl->va) 2984 be_sgl_destroy_contiguous(sgl); 2985 2986 be_sgl_create_contiguous(pmem->virtual_address, 2987 pmem->bus_address.u.a64.address, 2988 pmem->size, sgl); 2989 } 2990 2991 static void 2992 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2993 struct mem_array *pmem, struct be_dma_mem *sgl) 2994 { 2995 if (sgl->va) 2996 be_sgl_destroy_contiguous(sgl); 2997 2998 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2999 pmem->bus_address.u.a64.address, 3000 pmem->size, sgl); 3001 } 3002 3003 static int be_fill_queue(struct be_queue_info *q, 3004 u16 len, u16 entry_size, void *vaddress) 3005 { 3006 struct be_dma_mem *mem = &q->dma_mem; 3007 3008 memset(q, 0, sizeof(*q)); 3009 q->len = len; 3010 q->entry_size = entry_size; 3011 mem->size = len * entry_size; 3012 mem->va = vaddress; 3013 if (!mem->va) 3014 return -ENOMEM; 3015 memset(mem->va, 0, mem->size); 3016 return 0; 3017 } 3018 3019 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3020 struct hwi_context_memory *phwi_context) 3021 { 3022 int ret = -ENOMEM, eq_for_mcc; 3023 unsigned int i, num_eq_pages; 3024 struct be_queue_info *eq; 3025 struct be_dma_mem *mem; 3026 void *eq_vaddress; 3027 dma_addr_t paddr; 3028 3029 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3030 sizeof(struct be_eq_entry)); 3031 3032 if (phba->msix_enabled) 3033 eq_for_mcc = 1; 3034 else 3035 eq_for_mcc = 0; 3036 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3037 eq = &phwi_context->be_eq[i].q; 3038 mem = &eq->dma_mem; 3039 phwi_context->be_eq[i].phba = phba; 3040 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3041 num_eq_pages * PAGE_SIZE, 3042 &paddr); 3043 if (!eq_vaddress) 3044 goto create_eq_error; 3045 3046 mem->va = eq_vaddress; 3047 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3048 sizeof(struct be_eq_entry), eq_vaddress); 3049 if (ret) { 3050 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3051 "BM_%d : be_fill_queue Failed for EQ\n"); 3052 goto create_eq_error; 3053 } 3054 3055 mem->dma = paddr; 3056 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3057 phwi_context->cur_eqd); 3058 if (ret) { 3059 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3060 "BM_%d : beiscsi_cmd_eq_create" 3061 "Failed for EQ\n"); 3062 goto create_eq_error; 3063 } 3064 3065 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3066 "BM_%d : eqid = %d\n", 3067 phwi_context->be_eq[i].q.id); 3068 } 3069 return 0; 3070 3071 create_eq_error: 3072 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3073 eq = &phwi_context->be_eq[i].q; 3074 mem = &eq->dma_mem; 3075 if (mem->va) 3076 pci_free_consistent(phba->pcidev, num_eq_pages 3077 * PAGE_SIZE, 3078 mem->va, mem->dma); 3079 } 3080 return ret; 3081 } 3082 3083 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3084 struct hwi_context_memory *phwi_context) 3085 { 3086 unsigned int i, num_cq_pages; 3087 struct be_queue_info *cq, *eq; 3088 struct be_dma_mem *mem; 3089 struct be_eq_obj *pbe_eq; 3090 void *cq_vaddress; 3091 int ret = -ENOMEM; 3092 dma_addr_t paddr; 3093 3094 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3095 sizeof(struct sol_cqe)); 3096 3097 for (i = 0; i < phba->num_cpus; i++) { 3098 cq = &phwi_context->be_cq[i]; 3099 eq = &phwi_context->be_eq[i].q; 3100 pbe_eq = &phwi_context->be_eq[i]; 3101 pbe_eq->cq = cq; 3102 pbe_eq->phba = phba; 3103 mem = &cq->dma_mem; 3104 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3105 num_cq_pages * PAGE_SIZE, 3106 &paddr); 3107 if (!cq_vaddress) 3108 goto create_cq_error; 3109 3110 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3111 sizeof(struct sol_cqe), cq_vaddress); 3112 if (ret) { 3113 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3114 "BM_%d : be_fill_queue Failed " 3115 "for ISCSI CQ\n"); 3116 goto create_cq_error; 3117 } 3118 3119 mem->dma = paddr; 3120 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3121 false, 0); 3122 if (ret) { 3123 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3124 "BM_%d : beiscsi_cmd_eq_create" 3125 "Failed for ISCSI CQ\n"); 3126 goto create_cq_error; 3127 } 3128 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3129 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3130 "iSCSI CQ CREATED\n", cq->id, eq->id); 3131 } 3132 return 0; 3133 3134 create_cq_error: 3135 for (i = 0; i < phba->num_cpus; i++) { 3136 cq = &phwi_context->be_cq[i]; 3137 mem = &cq->dma_mem; 3138 if (mem->va) 3139 pci_free_consistent(phba->pcidev, num_cq_pages 3140 * PAGE_SIZE, 3141 mem->va, mem->dma); 3142 } 3143 return ret; 3144 } 3145 3146 static int 3147 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3148 struct hwi_context_memory *phwi_context, 3149 struct hwi_controller *phwi_ctrlr, 3150 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3151 { 3152 unsigned int idx; 3153 int ret; 3154 struct be_queue_info *dq, *cq; 3155 struct be_dma_mem *mem; 3156 struct be_mem_descriptor *mem_descr; 3157 void *dq_vaddress; 3158 3159 idx = 0; 3160 dq = &phwi_context->be_def_hdrq[ulp_num]; 3161 cq = &phwi_context->be_cq[0]; 3162 mem = &dq->dma_mem; 3163 mem_descr = phba->init_mem; 3164 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3165 (ulp_num * MEM_DESCR_OFFSET); 3166 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3167 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3168 sizeof(struct phys_addr), 3169 sizeof(struct phys_addr), dq_vaddress); 3170 if (ret) { 3171 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3172 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3173 ulp_num); 3174 3175 return ret; 3176 } 3177 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3178 bus_address.u.a64.address; 3179 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3180 def_pdu_ring_sz, 3181 phba->params.defpdu_hdr_sz, 3182 BEISCSI_DEFQ_HDR, ulp_num); 3183 if (ret) { 3184 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3185 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3186 ulp_num); 3187 3188 return ret; 3189 } 3190 3191 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3192 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3193 ulp_num, 3194 phwi_context->be_def_hdrq[ulp_num].id); 3195 return 0; 3196 } 3197 3198 static int 3199 beiscsi_create_def_data(struct beiscsi_hba *phba, 3200 struct hwi_context_memory *phwi_context, 3201 struct hwi_controller *phwi_ctrlr, 3202 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3203 { 3204 unsigned int idx; 3205 int ret; 3206 struct be_queue_info *dataq, *cq; 3207 struct be_dma_mem *mem; 3208 struct be_mem_descriptor *mem_descr; 3209 void *dq_vaddress; 3210 3211 idx = 0; 3212 dataq = &phwi_context->be_def_dataq[ulp_num]; 3213 cq = &phwi_context->be_cq[0]; 3214 mem = &dataq->dma_mem; 3215 mem_descr = phba->init_mem; 3216 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3217 (ulp_num * MEM_DESCR_OFFSET); 3218 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3219 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3220 sizeof(struct phys_addr), 3221 sizeof(struct phys_addr), dq_vaddress); 3222 if (ret) { 3223 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3224 "BM_%d : be_fill_queue Failed for DEF PDU " 3225 "DATA on ULP : %d\n", 3226 ulp_num); 3227 3228 return ret; 3229 } 3230 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3231 bus_address.u.a64.address; 3232 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3233 def_pdu_ring_sz, 3234 phba->params.defpdu_data_sz, 3235 BEISCSI_DEFQ_DATA, ulp_num); 3236 if (ret) { 3237 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3238 "BM_%d be_cmd_create_default_pdu_queue" 3239 " Failed for DEF PDU DATA on ULP : %d\n", 3240 ulp_num); 3241 return ret; 3242 } 3243 3244 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3245 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3246 ulp_num, 3247 phwi_context->be_def_dataq[ulp_num].id); 3248 3249 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3250 "BM_%d : DEFAULT PDU DATA RING CREATED" 3251 "on ULP : %d\n", ulp_num); 3252 return 0; 3253 } 3254 3255 3256 static int 3257 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3258 { 3259 struct be_mem_descriptor *mem_descr; 3260 struct mem_array *pm_arr; 3261 struct be_dma_mem sgl; 3262 int status, ulp_num; 3263 3264 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3265 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3266 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3267 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3268 (ulp_num * MEM_DESCR_OFFSET); 3269 pm_arr = mem_descr->mem_array; 3270 3271 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3272 status = be_cmd_iscsi_post_template_hdr( 3273 &phba->ctrl, &sgl); 3274 3275 if (status != 0) { 3276 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3277 "BM_%d : Post Template HDR Failed for" 3278 "ULP_%d\n", ulp_num); 3279 return status; 3280 } 3281 3282 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3283 "BM_%d : Template HDR Pages Posted for" 3284 "ULP_%d\n", ulp_num); 3285 } 3286 } 3287 return 0; 3288 } 3289 3290 static int 3291 beiscsi_post_pages(struct beiscsi_hba *phba) 3292 { 3293 struct be_mem_descriptor *mem_descr; 3294 struct mem_array *pm_arr; 3295 unsigned int page_offset, i; 3296 struct be_dma_mem sgl; 3297 int status, ulp_num = 0; 3298 3299 mem_descr = phba->init_mem; 3300 mem_descr += HWI_MEM_SGE; 3301 pm_arr = mem_descr->mem_array; 3302 3303 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3304 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3305 break; 3306 3307 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3308 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3309 for (i = 0; i < mem_descr->num_elements; i++) { 3310 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3311 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3312 page_offset, 3313 (pm_arr->size / PAGE_SIZE)); 3314 page_offset += pm_arr->size / PAGE_SIZE; 3315 if (status != 0) { 3316 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3317 "BM_%d : post sgl failed.\n"); 3318 return status; 3319 } 3320 pm_arr++; 3321 } 3322 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3323 "BM_%d : POSTED PAGES\n"); 3324 return 0; 3325 } 3326 3327 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3328 { 3329 struct be_dma_mem *mem = &q->dma_mem; 3330 if (mem->va) { 3331 pci_free_consistent(phba->pcidev, mem->size, 3332 mem->va, mem->dma); 3333 mem->va = NULL; 3334 } 3335 } 3336 3337 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3338 u16 len, u16 entry_size) 3339 { 3340 struct be_dma_mem *mem = &q->dma_mem; 3341 3342 memset(q, 0, sizeof(*q)); 3343 q->len = len; 3344 q->entry_size = entry_size; 3345 mem->size = len * entry_size; 3346 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3347 if (!mem->va) 3348 return -ENOMEM; 3349 return 0; 3350 } 3351 3352 static int 3353 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3354 struct hwi_context_memory *phwi_context, 3355 struct hwi_controller *phwi_ctrlr) 3356 { 3357 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3358 u64 pa_addr_lo; 3359 unsigned int idx, num, i, ulp_num; 3360 struct mem_array *pwrb_arr; 3361 void *wrb_vaddr; 3362 struct be_dma_mem sgl; 3363 struct be_mem_descriptor *mem_descr; 3364 struct hwi_wrb_context *pwrb_context; 3365 int status; 3366 uint8_t ulp_count = 0, ulp_base_num = 0; 3367 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3368 3369 idx = 0; 3370 mem_descr = phba->init_mem; 3371 mem_descr += HWI_MEM_WRB; 3372 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3373 GFP_KERNEL); 3374 if (!pwrb_arr) { 3375 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3376 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3377 return -ENOMEM; 3378 } 3379 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3380 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3381 num_wrb_rings = mem_descr->mem_array[idx].size / 3382 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3383 3384 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3385 if (num_wrb_rings) { 3386 pwrb_arr[num].virtual_address = wrb_vaddr; 3387 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3388 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3389 sizeof(struct iscsi_wrb); 3390 wrb_vaddr += pwrb_arr[num].size; 3391 pa_addr_lo += pwrb_arr[num].size; 3392 num_wrb_rings--; 3393 } else { 3394 idx++; 3395 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3396 pa_addr_lo = mem_descr->mem_array[idx].\ 3397 bus_address.u.a64.address; 3398 num_wrb_rings = mem_descr->mem_array[idx].size / 3399 (phba->params.wrbs_per_cxn * 3400 sizeof(struct iscsi_wrb)); 3401 pwrb_arr[num].virtual_address = wrb_vaddr; 3402 pwrb_arr[num].bus_address.u.a64.address\ 3403 = pa_addr_lo; 3404 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3405 sizeof(struct iscsi_wrb); 3406 wrb_vaddr += pwrb_arr[num].size; 3407 pa_addr_lo += pwrb_arr[num].size; 3408 num_wrb_rings--; 3409 } 3410 } 3411 3412 /* Get the ULP Count */ 3413 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3414 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3415 ulp_count++; 3416 ulp_base_num = ulp_num; 3417 cid_count_ulp[ulp_num] = 3418 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3419 } 3420 3421 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3422 wrb_mem_index = 0; 3423 offset = 0; 3424 size = 0; 3425 3426 if (ulp_count > 1) { 3427 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3428 3429 if (!cid_count_ulp[ulp_base_num]) 3430 ulp_base_num = (ulp_base_num + 1) % 3431 BEISCSI_ULP_COUNT; 3432 3433 cid_count_ulp[ulp_base_num]--; 3434 } 3435 3436 3437 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3438 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3439 &phwi_context->be_wrbq[i], 3440 &phwi_ctrlr->wrb_context[i], 3441 ulp_base_num); 3442 if (status != 0) { 3443 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3444 "BM_%d : wrbq create failed."); 3445 kfree(pwrb_arr); 3446 return status; 3447 } 3448 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3449 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3450 } 3451 kfree(pwrb_arr); 3452 return 0; 3453 } 3454 3455 static void free_wrb_handles(struct beiscsi_hba *phba) 3456 { 3457 unsigned int index; 3458 struct hwi_controller *phwi_ctrlr; 3459 struct hwi_wrb_context *pwrb_context; 3460 3461 phwi_ctrlr = phba->phwi_ctrlr; 3462 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3463 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3464 kfree(pwrb_context->pwrb_handle_base); 3465 kfree(pwrb_context->pwrb_handle_basestd); 3466 } 3467 } 3468 3469 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3470 { 3471 struct be_ctrl_info *ctrl = &phba->ctrl; 3472 struct be_dma_mem *ptag_mem; 3473 struct be_queue_info *q; 3474 int i, tag; 3475 3476 q = &phba->ctrl.mcc_obj.q; 3477 for (i = 0; i < MAX_MCC_CMD; i++) { 3478 tag = i + 1; 3479 if (!test_bit(MCC_TAG_STATE_RUNNING, 3480 &ctrl->ptag_state[tag].tag_state)) 3481 continue; 3482 3483 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3484 &ctrl->ptag_state[tag].tag_state)) { 3485 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3486 if (ptag_mem->size) { 3487 pci_free_consistent(ctrl->pdev, 3488 ptag_mem->size, 3489 ptag_mem->va, 3490 ptag_mem->dma); 3491 ptag_mem->size = 0; 3492 } 3493 continue; 3494 } 3495 /** 3496 * If MCC is still active and waiting then wake up the process. 3497 * We are here only because port is going offline. The process 3498 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3499 * returned for the operation and allocated memory cleaned up. 3500 */ 3501 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3502 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3503 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3504 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3505 /* 3506 * Control tag info gets reinitialized in enable 3507 * so wait for the process to clear running state. 3508 */ 3509 while (test_bit(MCC_TAG_STATE_RUNNING, 3510 &ctrl->ptag_state[tag].tag_state)) 3511 schedule_timeout_uninterruptible(HZ); 3512 } 3513 /** 3514 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3515 * MCC_TAG_STATE_IGNORE nothing needs to done. 3516 */ 3517 } 3518 if (q->created) { 3519 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3520 be_queue_free(phba, q); 3521 } 3522 3523 q = &phba->ctrl.mcc_obj.cq; 3524 if (q->created) { 3525 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3526 be_queue_free(phba, q); 3527 } 3528 } 3529 3530 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3531 struct hwi_context_memory *phwi_context) 3532 { 3533 struct be_queue_info *q, *cq; 3534 struct be_ctrl_info *ctrl = &phba->ctrl; 3535 3536 /* Alloc MCC compl queue */ 3537 cq = &phba->ctrl.mcc_obj.cq; 3538 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3539 sizeof(struct be_mcc_compl))) 3540 goto err; 3541 /* Ask BE to create MCC compl queue; */ 3542 if (phba->msix_enabled) { 3543 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3544 [phba->num_cpus].q, false, true, 0)) 3545 goto mcc_cq_free; 3546 } else { 3547 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3548 false, true, 0)) 3549 goto mcc_cq_free; 3550 } 3551 3552 /* Alloc MCC queue */ 3553 q = &phba->ctrl.mcc_obj.q; 3554 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3555 goto mcc_cq_destroy; 3556 3557 /* Ask BE to create MCC queue */ 3558 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3559 goto mcc_q_free; 3560 3561 return 0; 3562 3563 mcc_q_free: 3564 be_queue_free(phba, q); 3565 mcc_cq_destroy: 3566 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3567 mcc_cq_free: 3568 be_queue_free(phba, cq); 3569 err: 3570 return -ENOMEM; 3571 } 3572 3573 /** 3574 * find_num_cpus()- Get the CPU online count 3575 * @phba: ptr to priv structure 3576 * 3577 * CPU count is used for creating EQ. 3578 **/ 3579 static void find_num_cpus(struct beiscsi_hba *phba) 3580 { 3581 int num_cpus = 0; 3582 3583 num_cpus = num_online_cpus(); 3584 3585 switch (phba->generation) { 3586 case BE_GEN2: 3587 case BE_GEN3: 3588 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? 3589 BEISCSI_MAX_NUM_CPUS : num_cpus; 3590 break; 3591 case BE_GEN4: 3592 /* 3593 * If eqid_count == 1 fall back to 3594 * INTX mechanism 3595 **/ 3596 if (phba->fw_config.eqid_count == 1) { 3597 enable_msix = 0; 3598 phba->num_cpus = 1; 3599 return; 3600 } 3601 3602 phba->num_cpus = 3603 (num_cpus > (phba->fw_config.eqid_count - 1)) ? 3604 (phba->fw_config.eqid_count - 1) : num_cpus; 3605 break; 3606 default: 3607 phba->num_cpus = 1; 3608 } 3609 } 3610 3611 static void hwi_purge_eq(struct beiscsi_hba *phba) 3612 { 3613 struct hwi_controller *phwi_ctrlr; 3614 struct hwi_context_memory *phwi_context; 3615 struct be_queue_info *eq; 3616 struct be_eq_entry *eqe = NULL; 3617 int i, eq_msix; 3618 unsigned int num_processed; 3619 3620 if (beiscsi_hba_in_error(phba)) 3621 return; 3622 3623 phwi_ctrlr = phba->phwi_ctrlr; 3624 phwi_context = phwi_ctrlr->phwi_ctxt; 3625 if (phba->msix_enabled) 3626 eq_msix = 1; 3627 else 3628 eq_msix = 0; 3629 3630 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3631 eq = &phwi_context->be_eq[i].q; 3632 eqe = queue_tail_node(eq); 3633 num_processed = 0; 3634 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3635 & EQE_VALID_MASK) { 3636 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3637 queue_tail_inc(eq); 3638 eqe = queue_tail_node(eq); 3639 num_processed++; 3640 } 3641 3642 if (num_processed) 3643 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3644 } 3645 } 3646 3647 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3648 { 3649 struct be_queue_info *q; 3650 struct be_ctrl_info *ctrl = &phba->ctrl; 3651 struct hwi_controller *phwi_ctrlr; 3652 struct hwi_context_memory *phwi_context; 3653 struct hd_async_context *pasync_ctx; 3654 int i, eq_for_mcc, ulp_num; 3655 3656 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3657 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3658 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3659 3660 /** 3661 * Purge all EQ entries that may have been left out. This is to 3662 * workaround a problem we've seen occasionally where driver gets an 3663 * interrupt with EQ entry bit set after stopping the controller. 3664 */ 3665 hwi_purge_eq(phba); 3666 3667 phwi_ctrlr = phba->phwi_ctrlr; 3668 phwi_context = phwi_ctrlr->phwi_ctxt; 3669 3670 be_cmd_iscsi_remove_template_hdr(ctrl); 3671 3672 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3673 q = &phwi_context->be_wrbq[i]; 3674 if (q->created) 3675 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3676 } 3677 kfree(phwi_context->be_wrbq); 3678 free_wrb_handles(phba); 3679 3680 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3681 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3682 3683 q = &phwi_context->be_def_hdrq[ulp_num]; 3684 if (q->created) 3685 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3686 3687 q = &phwi_context->be_def_dataq[ulp_num]; 3688 if (q->created) 3689 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3690 3691 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 3692 } 3693 } 3694 3695 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3696 3697 for (i = 0; i < (phba->num_cpus); i++) { 3698 q = &phwi_context->be_cq[i]; 3699 if (q->created) { 3700 be_queue_free(phba, q); 3701 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3702 } 3703 } 3704 3705 be_mcc_queues_destroy(phba); 3706 if (phba->msix_enabled) 3707 eq_for_mcc = 1; 3708 else 3709 eq_for_mcc = 0; 3710 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3711 q = &phwi_context->be_eq[i].q; 3712 if (q->created) { 3713 be_queue_free(phba, q); 3714 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3715 } 3716 } 3717 /* this ensures complete FW cleanup */ 3718 beiscsi_cmd_function_reset(phba); 3719 /* last communication, indicate driver is unloading */ 3720 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3721 } 3722 3723 static int hwi_init_port(struct beiscsi_hba *phba) 3724 { 3725 struct hwi_controller *phwi_ctrlr; 3726 struct hwi_context_memory *phwi_context; 3727 unsigned int def_pdu_ring_sz; 3728 struct be_ctrl_info *ctrl = &phba->ctrl; 3729 int status, ulp_num; 3730 3731 phwi_ctrlr = phba->phwi_ctrlr; 3732 phwi_context = phwi_ctrlr->phwi_ctxt; 3733 phwi_context->max_eqd = 128; 3734 phwi_context->min_eqd = 0; 3735 phwi_context->cur_eqd = 32; 3736 /* set port optic state to unknown */ 3737 phba->optic_state = 0xff; 3738 3739 status = beiscsi_create_eqs(phba, phwi_context); 3740 if (status != 0) { 3741 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3742 "BM_%d : EQ not created\n"); 3743 goto error; 3744 } 3745 3746 status = be_mcc_queues_create(phba, phwi_context); 3747 if (status != 0) 3748 goto error; 3749 3750 status = beiscsi_check_supported_fw(ctrl, phba); 3751 if (status != 0) { 3752 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3753 "BM_%d : Unsupported fw version\n"); 3754 goto error; 3755 } 3756 3757 status = beiscsi_create_cqs(phba, phwi_context); 3758 if (status != 0) { 3759 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3760 "BM_%d : CQ not created\n"); 3761 goto error; 3762 } 3763 3764 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3765 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3766 def_pdu_ring_sz = 3767 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3768 sizeof(struct phys_addr); 3769 3770 status = beiscsi_create_def_hdr(phba, phwi_context, 3771 phwi_ctrlr, 3772 def_pdu_ring_sz, 3773 ulp_num); 3774 if (status != 0) { 3775 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3776 "BM_%d : Default Header not created for ULP : %d\n", 3777 ulp_num); 3778 goto error; 3779 } 3780 3781 status = beiscsi_create_def_data(phba, phwi_context, 3782 phwi_ctrlr, 3783 def_pdu_ring_sz, 3784 ulp_num); 3785 if (status != 0) { 3786 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3787 "BM_%d : Default Data not created for ULP : %d\n", 3788 ulp_num); 3789 goto error; 3790 } 3791 /** 3792 * Now that the default PDU rings have been created, 3793 * let EP know about it. 3794 * Call beiscsi_cmd_iscsi_cleanup before posting? 3795 */ 3796 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3797 ulp_num); 3798 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3799 ulp_num); 3800 } 3801 } 3802 3803 status = beiscsi_post_pages(phba); 3804 if (status != 0) { 3805 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3806 "BM_%d : Post SGL Pages Failed\n"); 3807 goto error; 3808 } 3809 3810 status = beiscsi_post_template_hdr(phba); 3811 if (status != 0) { 3812 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3813 "BM_%d : Template HDR Posting for CXN Failed\n"); 3814 } 3815 3816 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3817 if (status != 0) { 3818 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3819 "BM_%d : WRB Rings not created\n"); 3820 goto error; 3821 } 3822 3823 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3824 uint16_t async_arr_idx = 0; 3825 3826 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3827 uint16_t cri = 0; 3828 struct hd_async_context *pasync_ctx; 3829 3830 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3831 phwi_ctrlr, ulp_num); 3832 for (cri = 0; cri < 3833 phba->params.cxns_per_ctrl; cri++) { 3834 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3835 (phwi_ctrlr, cri)) 3836 pasync_ctx->cid_to_async_cri_map[ 3837 phwi_ctrlr->wrb_context[cri].cid] = 3838 async_arr_idx++; 3839 } 3840 /** 3841 * Now that the default PDU rings have been created, 3842 * let EP know about it. 3843 */ 3844 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3845 ulp_num); 3846 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3847 ulp_num); 3848 } 3849 } 3850 3851 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3852 "BM_%d : hwi_init_port success\n"); 3853 return 0; 3854 3855 error: 3856 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3857 "BM_%d : hwi_init_port failed"); 3858 hwi_cleanup_port(phba); 3859 return status; 3860 } 3861 3862 static int hwi_init_controller(struct beiscsi_hba *phba) 3863 { 3864 struct hwi_controller *phwi_ctrlr; 3865 3866 phwi_ctrlr = phba->phwi_ctrlr; 3867 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3868 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3869 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3870 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3871 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3872 phwi_ctrlr->phwi_ctxt); 3873 } else { 3874 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3875 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3876 "than one element.Failing to load\n"); 3877 return -ENOMEM; 3878 } 3879 3880 iscsi_init_global_templates(phba); 3881 if (beiscsi_init_wrb_handle(phba)) 3882 return -ENOMEM; 3883 3884 if (hwi_init_async_pdu_ctx(phba)) { 3885 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3886 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3887 return -ENOMEM; 3888 } 3889 3890 if (hwi_init_port(phba) != 0) { 3891 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3892 "BM_%d : hwi_init_controller failed\n"); 3893 3894 return -ENOMEM; 3895 } 3896 return 0; 3897 } 3898 3899 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3900 { 3901 struct be_mem_descriptor *mem_descr; 3902 int i, j; 3903 3904 mem_descr = phba->init_mem; 3905 i = 0; 3906 j = 0; 3907 for (i = 0; i < SE_MEM_MAX; i++) { 3908 for (j = mem_descr->num_elements; j > 0; j--) { 3909 pci_free_consistent(phba->pcidev, 3910 mem_descr->mem_array[j - 1].size, 3911 mem_descr->mem_array[j - 1].virtual_address, 3912 (unsigned long)mem_descr->mem_array[j - 1]. 3913 bus_address.u.a64.address); 3914 } 3915 3916 kfree(mem_descr->mem_array); 3917 mem_descr++; 3918 } 3919 kfree(phba->init_mem); 3920 kfree(phba->phwi_ctrlr->wrb_context); 3921 kfree(phba->phwi_ctrlr); 3922 } 3923 3924 static int beiscsi_init_controller(struct beiscsi_hba *phba) 3925 { 3926 int ret = -ENOMEM; 3927 3928 ret = beiscsi_get_memory(phba); 3929 if (ret < 0) { 3930 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3931 "BM_%d : beiscsi_dev_probe -" 3932 "Failed in beiscsi_alloc_memory\n"); 3933 return ret; 3934 } 3935 3936 ret = hwi_init_controller(phba); 3937 if (ret) 3938 goto free_init; 3939 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3940 "BM_%d : Return success from beiscsi_init_controller"); 3941 3942 return 0; 3943 3944 free_init: 3945 beiscsi_free_mem(phba); 3946 return ret; 3947 } 3948 3949 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3950 { 3951 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3952 struct sgl_handle *psgl_handle; 3953 struct iscsi_sge *pfrag; 3954 unsigned int arr_index, i, idx; 3955 unsigned int ulp_icd_start, ulp_num = 0; 3956 3957 phba->io_sgl_hndl_avbl = 0; 3958 phba->eh_sgl_hndl_avbl = 0; 3959 3960 mem_descr_sglh = phba->init_mem; 3961 mem_descr_sglh += HWI_MEM_SGLH; 3962 if (1 == mem_descr_sglh->num_elements) { 3963 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3964 phba->params.ios_per_ctrl, 3965 GFP_KERNEL); 3966 if (!phba->io_sgl_hndl_base) { 3967 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3968 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3969 return -ENOMEM; 3970 } 3971 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3972 (phba->params.icds_per_ctrl - 3973 phba->params.ios_per_ctrl), 3974 GFP_KERNEL); 3975 if (!phba->eh_sgl_hndl_base) { 3976 kfree(phba->io_sgl_hndl_base); 3977 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3978 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3979 return -ENOMEM; 3980 } 3981 } else { 3982 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3983 "BM_%d : HWI_MEM_SGLH is more than one element." 3984 "Failing to load\n"); 3985 return -ENOMEM; 3986 } 3987 3988 arr_index = 0; 3989 idx = 0; 3990 while (idx < mem_descr_sglh->num_elements) { 3991 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3992 3993 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3994 sizeof(struct sgl_handle)); i++) { 3995 if (arr_index < phba->params.ios_per_ctrl) { 3996 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3997 phba->io_sgl_hndl_avbl++; 3998 arr_index++; 3999 } else { 4000 phba->eh_sgl_hndl_base[arr_index - 4001 phba->params.ios_per_ctrl] = 4002 psgl_handle; 4003 arr_index++; 4004 phba->eh_sgl_hndl_avbl++; 4005 } 4006 psgl_handle++; 4007 } 4008 idx++; 4009 } 4010 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4011 "BM_%d : phba->io_sgl_hndl_avbl=%d" 4012 "phba->eh_sgl_hndl_avbl=%d\n", 4013 phba->io_sgl_hndl_avbl, 4014 phba->eh_sgl_hndl_avbl); 4015 4016 mem_descr_sg = phba->init_mem; 4017 mem_descr_sg += HWI_MEM_SGE; 4018 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4019 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4020 mem_descr_sg->num_elements); 4021 4022 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 4023 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 4024 break; 4025 4026 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 4027 4028 arr_index = 0; 4029 idx = 0; 4030 while (idx < mem_descr_sg->num_elements) { 4031 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 4032 4033 for (i = 0; 4034 i < (mem_descr_sg->mem_array[idx].size) / 4035 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 4036 i++) { 4037 if (arr_index < phba->params.ios_per_ctrl) 4038 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 4039 else 4040 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 4041 phba->params.ios_per_ctrl]; 4042 psgl_handle->pfrag = pfrag; 4043 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4044 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4045 pfrag += phba->params.num_sge_per_io; 4046 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 4047 } 4048 idx++; 4049 } 4050 phba->io_sgl_free_index = 0; 4051 phba->io_sgl_alloc_index = 0; 4052 phba->eh_sgl_free_index = 0; 4053 phba->eh_sgl_alloc_index = 0; 4054 return 0; 4055 } 4056 4057 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4058 { 4059 int ret; 4060 uint16_t i, ulp_num; 4061 struct ulp_cid_info *ptr_cid_info = NULL; 4062 4063 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4064 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4065 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4066 GFP_KERNEL); 4067 4068 if (!ptr_cid_info) { 4069 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4070 "BM_%d : Failed to allocate memory" 4071 "for ULP_CID_INFO for ULP : %d\n", 4072 ulp_num); 4073 ret = -ENOMEM; 4074 goto free_memory; 4075 4076 } 4077 4078 /* Allocate memory for CID array */ 4079 ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 4080 BEISCSI_GET_CID_COUNT(phba, 4081 ulp_num), GFP_KERNEL); 4082 if (!ptr_cid_info->cid_array) { 4083 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4084 "BM_%d : Failed to allocate memory" 4085 "for CID_ARRAY for ULP : %d\n", 4086 ulp_num); 4087 kfree(ptr_cid_info); 4088 ptr_cid_info = NULL; 4089 ret = -ENOMEM; 4090 4091 goto free_memory; 4092 } 4093 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4094 phba, ulp_num); 4095 4096 /* Save the cid_info_array ptr */ 4097 phba->cid_array_info[ulp_num] = ptr_cid_info; 4098 } 4099 } 4100 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4101 phba->params.cxns_per_ctrl, GFP_KERNEL); 4102 if (!phba->ep_array) { 4103 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4104 "BM_%d : Failed to allocate memory in " 4105 "hba_setup_cid_tbls\n"); 4106 ret = -ENOMEM; 4107 4108 goto free_memory; 4109 } 4110 4111 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4112 phba->params.cxns_per_ctrl, GFP_KERNEL); 4113 if (!phba->conn_table) { 4114 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4115 "BM_%d : Failed to allocate memory in" 4116 "hba_setup_cid_tbls\n"); 4117 4118 kfree(phba->ep_array); 4119 phba->ep_array = NULL; 4120 ret = -ENOMEM; 4121 4122 goto free_memory; 4123 } 4124 4125 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4126 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4127 4128 ptr_cid_info = phba->cid_array_info[ulp_num]; 4129 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4130 phba->phwi_ctrlr->wrb_context[i].cid; 4131 4132 } 4133 4134 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4135 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4136 ptr_cid_info = phba->cid_array_info[ulp_num]; 4137 4138 ptr_cid_info->cid_alloc = 0; 4139 ptr_cid_info->cid_free = 0; 4140 } 4141 } 4142 return 0; 4143 4144 free_memory: 4145 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4146 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4147 ptr_cid_info = phba->cid_array_info[ulp_num]; 4148 4149 if (ptr_cid_info) { 4150 kfree(ptr_cid_info->cid_array); 4151 kfree(ptr_cid_info); 4152 phba->cid_array_info[ulp_num] = NULL; 4153 } 4154 } 4155 } 4156 4157 return ret; 4158 } 4159 4160 static void hwi_enable_intr(struct beiscsi_hba *phba) 4161 { 4162 struct be_ctrl_info *ctrl = &phba->ctrl; 4163 struct hwi_controller *phwi_ctrlr; 4164 struct hwi_context_memory *phwi_context; 4165 struct be_queue_info *eq; 4166 u8 __iomem *addr; 4167 u32 reg, i; 4168 u32 enabled; 4169 4170 phwi_ctrlr = phba->phwi_ctrlr; 4171 phwi_context = phwi_ctrlr->phwi_ctxt; 4172 4173 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4174 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4175 reg = ioread32(addr); 4176 4177 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4178 if (!enabled) { 4179 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4180 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4181 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4182 iowrite32(reg, addr); 4183 } 4184 4185 if (!phba->msix_enabled) { 4186 eq = &phwi_context->be_eq[0].q; 4187 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4188 "BM_%d : eq->id=%d\n", eq->id); 4189 4190 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4191 } else { 4192 for (i = 0; i <= phba->num_cpus; i++) { 4193 eq = &phwi_context->be_eq[i].q; 4194 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4195 "BM_%d : eq->id=%d\n", eq->id); 4196 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4197 } 4198 } 4199 } 4200 4201 static void hwi_disable_intr(struct beiscsi_hba *phba) 4202 { 4203 struct be_ctrl_info *ctrl = &phba->ctrl; 4204 4205 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4206 u32 reg = ioread32(addr); 4207 4208 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4209 if (enabled) { 4210 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4211 iowrite32(reg, addr); 4212 } else 4213 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4214 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4215 } 4216 4217 static int beiscsi_init_port(struct beiscsi_hba *phba) 4218 { 4219 int ret; 4220 4221 ret = beiscsi_init_controller(phba); 4222 if (ret < 0) { 4223 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4224 "BM_%d : beiscsi_dev_probe - Failed in" 4225 "beiscsi_init_controller\n"); 4226 return ret; 4227 } 4228 ret = beiscsi_init_sgl_handle(phba); 4229 if (ret < 0) { 4230 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4231 "BM_%d : beiscsi_dev_probe - Failed in" 4232 "beiscsi_init_sgl_handle\n"); 4233 goto do_cleanup_ctrlr; 4234 } 4235 4236 ret = hba_setup_cid_tbls(phba); 4237 if (ret < 0) { 4238 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4239 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4240 kfree(phba->io_sgl_hndl_base); 4241 kfree(phba->eh_sgl_hndl_base); 4242 goto do_cleanup_ctrlr; 4243 } 4244 4245 return ret; 4246 4247 do_cleanup_ctrlr: 4248 hwi_cleanup_port(phba); 4249 return ret; 4250 } 4251 4252 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4253 { 4254 struct ulp_cid_info *ptr_cid_info = NULL; 4255 int ulp_num; 4256 4257 kfree(phba->io_sgl_hndl_base); 4258 kfree(phba->eh_sgl_hndl_base); 4259 kfree(phba->ep_array); 4260 kfree(phba->conn_table); 4261 4262 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4263 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4264 ptr_cid_info = phba->cid_array_info[ulp_num]; 4265 4266 if (ptr_cid_info) { 4267 kfree(ptr_cid_info->cid_array); 4268 kfree(ptr_cid_info); 4269 phba->cid_array_info[ulp_num] = NULL; 4270 } 4271 } 4272 } 4273 } 4274 4275 /** 4276 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4277 * @beiscsi_conn: ptr to the conn to be cleaned up 4278 * @task: ptr to iscsi_task resource to be freed. 4279 * 4280 * Free driver mgmt resources binded to CXN. 4281 **/ 4282 void 4283 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4284 struct iscsi_task *task) 4285 { 4286 struct beiscsi_io_task *io_task; 4287 struct beiscsi_hba *phba = beiscsi_conn->phba; 4288 struct hwi_wrb_context *pwrb_context; 4289 struct hwi_controller *phwi_ctrlr; 4290 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4291 beiscsi_conn->beiscsi_conn_cid); 4292 4293 phwi_ctrlr = phba->phwi_ctrlr; 4294 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4295 4296 io_task = task->dd_data; 4297 4298 if (io_task->pwrb_handle) { 4299 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4300 io_task->pwrb_handle = NULL; 4301 } 4302 4303 if (io_task->psgl_handle) { 4304 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4305 io_task->psgl_handle = NULL; 4306 } 4307 4308 if (io_task->mtask_addr) { 4309 pci_unmap_single(phba->pcidev, 4310 io_task->mtask_addr, 4311 io_task->mtask_data_count, 4312 PCI_DMA_TODEVICE); 4313 io_task->mtask_addr = 0; 4314 } 4315 } 4316 4317 /** 4318 * beiscsi_cleanup_task()- Free driver resources of the task 4319 * @task: ptr to the iscsi task 4320 * 4321 **/ 4322 static void beiscsi_cleanup_task(struct iscsi_task *task) 4323 { 4324 struct beiscsi_io_task *io_task = task->dd_data; 4325 struct iscsi_conn *conn = task->conn; 4326 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4327 struct beiscsi_hba *phba = beiscsi_conn->phba; 4328 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4329 struct hwi_wrb_context *pwrb_context; 4330 struct hwi_controller *phwi_ctrlr; 4331 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4332 beiscsi_conn->beiscsi_conn_cid); 4333 4334 phwi_ctrlr = phba->phwi_ctrlr; 4335 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4336 4337 if (io_task->cmd_bhs) { 4338 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4339 io_task->bhs_pa.u.a64.address); 4340 io_task->cmd_bhs = NULL; 4341 task->hdr = NULL; 4342 } 4343 4344 if (task->sc) { 4345 if (io_task->pwrb_handle) { 4346 free_wrb_handle(phba, pwrb_context, 4347 io_task->pwrb_handle); 4348 io_task->pwrb_handle = NULL; 4349 } 4350 4351 if (io_task->psgl_handle) { 4352 free_io_sgl_handle(phba, io_task->psgl_handle); 4353 io_task->psgl_handle = NULL; 4354 } 4355 4356 if (io_task->scsi_cmnd) { 4357 if (io_task->num_sg) 4358 scsi_dma_unmap(io_task->scsi_cmnd); 4359 io_task->scsi_cmnd = NULL; 4360 } 4361 } else { 4362 if (!beiscsi_conn->login_in_progress) 4363 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4364 } 4365 } 4366 4367 void 4368 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4369 struct beiscsi_offload_params *params) 4370 { 4371 struct wrb_handle *pwrb_handle; 4372 struct hwi_wrb_context *pwrb_context = NULL; 4373 struct beiscsi_hba *phba = beiscsi_conn->phba; 4374 struct iscsi_task *task = beiscsi_conn->task; 4375 struct iscsi_session *session = task->conn->session; 4376 u32 doorbell = 0; 4377 4378 /* 4379 * We can always use 0 here because it is reserved by libiscsi for 4380 * login/startup related tasks. 4381 */ 4382 beiscsi_conn->login_in_progress = 0; 4383 spin_lock_bh(&session->back_lock); 4384 beiscsi_cleanup_task(task); 4385 spin_unlock_bh(&session->back_lock); 4386 4387 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4388 &pwrb_context); 4389 4390 /* Check for the adapter family */ 4391 if (is_chip_be2_be3r(phba)) 4392 beiscsi_offload_cxn_v0(params, pwrb_handle, 4393 phba->init_mem, 4394 pwrb_context); 4395 else 4396 beiscsi_offload_cxn_v2(params, pwrb_handle, 4397 pwrb_context); 4398 4399 be_dws_le_to_cpu(pwrb_handle->pwrb, 4400 sizeof(struct iscsi_target_context_update_wrb)); 4401 4402 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4403 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4404 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4405 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4406 iowrite32(doorbell, phba->db_va + 4407 beiscsi_conn->doorbell_offset); 4408 4409 /* 4410 * There is no completion for CONTEXT_UPDATE. The completion of next 4411 * WRB posted guarantees FW's processing and DMA'ing of it. 4412 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4413 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4414 */ 4415 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4416 phba->params.wrbs_per_cxn); 4417 beiscsi_log(phba, KERN_INFO, 4418 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4419 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4420 pwrb_handle, pwrb_context->free_index, 4421 pwrb_context->wrb_handles_available); 4422 } 4423 4424 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4425 int *index, int *age) 4426 { 4427 *index = (int)itt; 4428 if (age) 4429 *age = conn->session->age; 4430 } 4431 4432 /** 4433 * beiscsi_alloc_pdu - allocates pdu and related resources 4434 * @task: libiscsi task 4435 * @opcode: opcode of pdu for task 4436 * 4437 * This is called with the session lock held. It will allocate 4438 * the wrb and sgl if needed for the command. And it will prep 4439 * the pdu's itt. beiscsi_parse_pdu will later translate 4440 * the pdu itt to the libiscsi task itt. 4441 */ 4442 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4443 { 4444 struct beiscsi_io_task *io_task = task->dd_data; 4445 struct iscsi_conn *conn = task->conn; 4446 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4447 struct beiscsi_hba *phba = beiscsi_conn->phba; 4448 struct hwi_wrb_context *pwrb_context; 4449 struct hwi_controller *phwi_ctrlr; 4450 itt_t itt; 4451 uint16_t cri_index = 0; 4452 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4453 dma_addr_t paddr; 4454 4455 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4456 GFP_ATOMIC, &paddr); 4457 if (!io_task->cmd_bhs) 4458 return -ENOMEM; 4459 io_task->bhs_pa.u.a64.address = paddr; 4460 io_task->libiscsi_itt = (itt_t)task->itt; 4461 io_task->conn = beiscsi_conn; 4462 4463 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4464 task->hdr_max = sizeof(struct be_cmd_bhs); 4465 io_task->psgl_handle = NULL; 4466 io_task->pwrb_handle = NULL; 4467 4468 if (task->sc) { 4469 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4470 if (!io_task->psgl_handle) { 4471 beiscsi_log(phba, KERN_ERR, 4472 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4473 "BM_%d : Alloc of IO_SGL_ICD Failed" 4474 "for the CID : %d\n", 4475 beiscsi_conn->beiscsi_conn_cid); 4476 goto free_hndls; 4477 } 4478 io_task->pwrb_handle = alloc_wrb_handle(phba, 4479 beiscsi_conn->beiscsi_conn_cid, 4480 &io_task->pwrb_context); 4481 if (!io_task->pwrb_handle) { 4482 beiscsi_log(phba, KERN_ERR, 4483 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4484 "BM_%d : Alloc of WRB_HANDLE Failed" 4485 "for the CID : %d\n", 4486 beiscsi_conn->beiscsi_conn_cid); 4487 goto free_io_hndls; 4488 } 4489 } else { 4490 io_task->scsi_cmnd = NULL; 4491 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4492 beiscsi_conn->task = task; 4493 if (!beiscsi_conn->login_in_progress) { 4494 io_task->psgl_handle = (struct sgl_handle *) 4495 alloc_mgmt_sgl_handle(phba); 4496 if (!io_task->psgl_handle) { 4497 beiscsi_log(phba, KERN_ERR, 4498 BEISCSI_LOG_IO | 4499 BEISCSI_LOG_CONFIG, 4500 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4501 "for the CID : %d\n", 4502 beiscsi_conn-> 4503 beiscsi_conn_cid); 4504 goto free_hndls; 4505 } 4506 4507 beiscsi_conn->login_in_progress = 1; 4508 beiscsi_conn->plogin_sgl_handle = 4509 io_task->psgl_handle; 4510 io_task->pwrb_handle = 4511 alloc_wrb_handle(phba, 4512 beiscsi_conn->beiscsi_conn_cid, 4513 &io_task->pwrb_context); 4514 if (!io_task->pwrb_handle) { 4515 beiscsi_log(phba, KERN_ERR, 4516 BEISCSI_LOG_IO | 4517 BEISCSI_LOG_CONFIG, 4518 "BM_%d : Alloc of WRB_HANDLE Failed" 4519 "for the CID : %d\n", 4520 beiscsi_conn-> 4521 beiscsi_conn_cid); 4522 goto free_mgmt_hndls; 4523 } 4524 beiscsi_conn->plogin_wrb_handle = 4525 io_task->pwrb_handle; 4526 4527 } else { 4528 io_task->psgl_handle = 4529 beiscsi_conn->plogin_sgl_handle; 4530 io_task->pwrb_handle = 4531 beiscsi_conn->plogin_wrb_handle; 4532 } 4533 } else { 4534 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4535 if (!io_task->psgl_handle) { 4536 beiscsi_log(phba, KERN_ERR, 4537 BEISCSI_LOG_IO | 4538 BEISCSI_LOG_CONFIG, 4539 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4540 "for the CID : %d\n", 4541 beiscsi_conn-> 4542 beiscsi_conn_cid); 4543 goto free_hndls; 4544 } 4545 io_task->pwrb_handle = 4546 alloc_wrb_handle(phba, 4547 beiscsi_conn->beiscsi_conn_cid, 4548 &io_task->pwrb_context); 4549 if (!io_task->pwrb_handle) { 4550 beiscsi_log(phba, KERN_ERR, 4551 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4552 "BM_%d : Alloc of WRB_HANDLE Failed" 4553 "for the CID : %d\n", 4554 beiscsi_conn->beiscsi_conn_cid); 4555 goto free_mgmt_hndls; 4556 } 4557 4558 } 4559 } 4560 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4561 wrb_index << 16) | (unsigned int) 4562 (io_task->psgl_handle->sgl_index)); 4563 io_task->pwrb_handle->pio_handle = task; 4564 4565 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4566 return 0; 4567 4568 free_io_hndls: 4569 free_io_sgl_handle(phba, io_task->psgl_handle); 4570 goto free_hndls; 4571 free_mgmt_hndls: 4572 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4573 io_task->psgl_handle = NULL; 4574 free_hndls: 4575 phwi_ctrlr = phba->phwi_ctrlr; 4576 cri_index = BE_GET_CRI_FROM_CID( 4577 beiscsi_conn->beiscsi_conn_cid); 4578 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4579 if (io_task->pwrb_handle) 4580 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4581 io_task->pwrb_handle = NULL; 4582 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4583 io_task->bhs_pa.u.a64.address); 4584 io_task->cmd_bhs = NULL; 4585 return -ENOMEM; 4586 } 4587 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4588 unsigned int num_sg, unsigned int xferlen, 4589 unsigned int writedir) 4590 { 4591 4592 struct beiscsi_io_task *io_task = task->dd_data; 4593 struct iscsi_conn *conn = task->conn; 4594 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4595 struct beiscsi_hba *phba = beiscsi_conn->phba; 4596 struct iscsi_wrb *pwrb = NULL; 4597 unsigned int doorbell = 0; 4598 4599 pwrb = io_task->pwrb_handle->pwrb; 4600 4601 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4602 4603 if (writedir) { 4604 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4605 INI_WR_CMD); 4606 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4607 } else { 4608 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4609 INI_RD_CMD); 4610 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4611 } 4612 4613 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4614 type, pwrb); 4615 4616 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4617 cpu_to_be16(*(unsigned short *) 4618 &io_task->cmd_bhs->iscsi_hdr.lun)); 4619 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4620 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4621 io_task->pwrb_handle->wrb_index); 4622 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4623 be32_to_cpu(task->cmdsn)); 4624 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4625 io_task->psgl_handle->sgl_index); 4626 4627 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4628 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4629 io_task->pwrb_handle->wrb_index); 4630 if (io_task->pwrb_context->plast_wrb) 4631 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4632 io_task->pwrb_context->plast_wrb, 4633 io_task->pwrb_handle->wrb_index); 4634 io_task->pwrb_context->plast_wrb = pwrb; 4635 4636 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4637 4638 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4639 doorbell |= (io_task->pwrb_handle->wrb_index & 4640 DB_DEF_PDU_WRB_INDEX_MASK) << 4641 DB_DEF_PDU_WRB_INDEX_SHIFT; 4642 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4643 iowrite32(doorbell, phba->db_va + 4644 beiscsi_conn->doorbell_offset); 4645 return 0; 4646 } 4647 4648 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4649 unsigned int num_sg, unsigned int xferlen, 4650 unsigned int writedir) 4651 { 4652 4653 struct beiscsi_io_task *io_task = task->dd_data; 4654 struct iscsi_conn *conn = task->conn; 4655 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4656 struct beiscsi_hba *phba = beiscsi_conn->phba; 4657 struct iscsi_wrb *pwrb = NULL; 4658 unsigned int doorbell = 0; 4659 4660 pwrb = io_task->pwrb_handle->pwrb; 4661 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4662 4663 if (writedir) { 4664 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4665 INI_WR_CMD); 4666 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4667 } else { 4668 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4669 INI_RD_CMD); 4670 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4671 } 4672 4673 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4674 type, pwrb); 4675 4676 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4677 cpu_to_be16(*(unsigned short *) 4678 &io_task->cmd_bhs->iscsi_hdr.lun)); 4679 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4680 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4681 io_task->pwrb_handle->wrb_index); 4682 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4683 be32_to_cpu(task->cmdsn)); 4684 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4685 io_task->psgl_handle->sgl_index); 4686 4687 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4688 4689 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4690 io_task->pwrb_handle->wrb_index); 4691 if (io_task->pwrb_context->plast_wrb) 4692 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4693 io_task->pwrb_context->plast_wrb, 4694 io_task->pwrb_handle->wrb_index); 4695 io_task->pwrb_context->plast_wrb = pwrb; 4696 4697 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4698 4699 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4700 doorbell |= (io_task->pwrb_handle->wrb_index & 4701 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4702 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4703 4704 iowrite32(doorbell, phba->db_va + 4705 beiscsi_conn->doorbell_offset); 4706 return 0; 4707 } 4708 4709 static int beiscsi_mtask(struct iscsi_task *task) 4710 { 4711 struct beiscsi_io_task *io_task = task->dd_data; 4712 struct iscsi_conn *conn = task->conn; 4713 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4714 struct beiscsi_hba *phba = beiscsi_conn->phba; 4715 struct iscsi_wrb *pwrb = NULL; 4716 unsigned int doorbell = 0; 4717 unsigned int cid; 4718 unsigned int pwrb_typeoffset = 0; 4719 int ret = 0; 4720 4721 cid = beiscsi_conn->beiscsi_conn_cid; 4722 pwrb = io_task->pwrb_handle->pwrb; 4723 4724 if (is_chip_be2_be3r(phba)) { 4725 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4726 be32_to_cpu(task->cmdsn)); 4727 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4728 io_task->pwrb_handle->wrb_index); 4729 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4730 io_task->psgl_handle->sgl_index); 4731 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4732 task->data_count); 4733 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4734 io_task->pwrb_handle->wrb_index); 4735 if (io_task->pwrb_context->plast_wrb) 4736 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4737 io_task->pwrb_context->plast_wrb, 4738 io_task->pwrb_handle->wrb_index); 4739 io_task->pwrb_context->plast_wrb = pwrb; 4740 4741 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4742 } else { 4743 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4744 be32_to_cpu(task->cmdsn)); 4745 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4746 io_task->pwrb_handle->wrb_index); 4747 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4748 io_task->psgl_handle->sgl_index); 4749 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4750 task->data_count); 4751 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4752 io_task->pwrb_handle->wrb_index); 4753 if (io_task->pwrb_context->plast_wrb) 4754 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4755 io_task->pwrb_context->plast_wrb, 4756 io_task->pwrb_handle->wrb_index); 4757 io_task->pwrb_context->plast_wrb = pwrb; 4758 4759 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4760 } 4761 4762 4763 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4764 case ISCSI_OP_LOGIN: 4765 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4766 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4767 ret = hwi_write_buffer(pwrb, task); 4768 break; 4769 case ISCSI_OP_NOOP_OUT: 4770 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4771 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4772 if (is_chip_be2_be3r(phba)) 4773 AMAP_SET_BITS(struct amap_iscsi_wrb, 4774 dmsg, pwrb, 1); 4775 else 4776 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4777 dmsg, pwrb, 1); 4778 } else { 4779 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4780 if (is_chip_be2_be3r(phba)) 4781 AMAP_SET_BITS(struct amap_iscsi_wrb, 4782 dmsg, pwrb, 0); 4783 else 4784 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4785 dmsg, pwrb, 0); 4786 } 4787 ret = hwi_write_buffer(pwrb, task); 4788 break; 4789 case ISCSI_OP_TEXT: 4790 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4791 ret = hwi_write_buffer(pwrb, task); 4792 break; 4793 case ISCSI_OP_SCSI_TMFUNC: 4794 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4795 ret = hwi_write_buffer(pwrb, task); 4796 break; 4797 case ISCSI_OP_LOGOUT: 4798 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4799 ret = hwi_write_buffer(pwrb, task); 4800 break; 4801 4802 default: 4803 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4804 "BM_%d : opcode =%d Not supported\n", 4805 task->hdr->opcode & ISCSI_OPCODE_MASK); 4806 4807 return -EINVAL; 4808 } 4809 4810 if (ret) 4811 return ret; 4812 4813 /* Set the task type */ 4814 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4815 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4816 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4817 4818 doorbell |= cid & DB_WRB_POST_CID_MASK; 4819 doorbell |= (io_task->pwrb_handle->wrb_index & 4820 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4821 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4822 iowrite32(doorbell, phba->db_va + 4823 beiscsi_conn->doorbell_offset); 4824 return 0; 4825 } 4826 4827 static int beiscsi_task_xmit(struct iscsi_task *task) 4828 { 4829 struct beiscsi_io_task *io_task = task->dd_data; 4830 struct scsi_cmnd *sc = task->sc; 4831 struct beiscsi_hba *phba; 4832 struct scatterlist *sg; 4833 int num_sg; 4834 unsigned int writedir = 0, xferlen = 0; 4835 4836 phba = io_task->conn->phba; 4837 /** 4838 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4839 * operational if FW still gets heartbeat from EP FW. Is management 4840 * path really needed to continue further? 4841 */ 4842 if (!beiscsi_hba_is_online(phba)) 4843 return -EIO; 4844 4845 if (!io_task->conn->login_in_progress) 4846 task->hdr->exp_statsn = 0; 4847 4848 if (!sc) 4849 return beiscsi_mtask(task); 4850 4851 io_task->scsi_cmnd = sc; 4852 io_task->num_sg = 0; 4853 num_sg = scsi_dma_map(sc); 4854 if (num_sg < 0) { 4855 beiscsi_log(phba, KERN_ERR, 4856 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4857 "BM_%d : scsi_dma_map Failed " 4858 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4859 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4860 io_task->libiscsi_itt, scsi_bufflen(sc)); 4861 4862 return num_sg; 4863 } 4864 /** 4865 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4866 * For management task, cleanup_task checks mtask_addr before unmapping. 4867 */ 4868 io_task->num_sg = num_sg; 4869 xferlen = scsi_bufflen(sc); 4870 sg = scsi_sglist(sc); 4871 if (sc->sc_data_direction == DMA_TO_DEVICE) 4872 writedir = 1; 4873 else 4874 writedir = 0; 4875 4876 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4877 } 4878 4879 /** 4880 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4881 * @job: job to handle 4882 */ 4883 static int beiscsi_bsg_request(struct bsg_job *job) 4884 { 4885 struct Scsi_Host *shost; 4886 struct beiscsi_hba *phba; 4887 struct iscsi_bsg_request *bsg_req = job->request; 4888 int rc = -EINVAL; 4889 unsigned int tag; 4890 struct be_dma_mem nonemb_cmd; 4891 struct be_cmd_resp_hdr *resp; 4892 struct iscsi_bsg_reply *bsg_reply = job->reply; 4893 unsigned short status, extd_status; 4894 4895 shost = iscsi_job_to_shost(job); 4896 phba = iscsi_host_priv(shost); 4897 4898 if (!beiscsi_hba_is_online(phba)) { 4899 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4900 "BM_%d : HBA in error 0x%lx\n", phba->state); 4901 return -ENXIO; 4902 } 4903 4904 switch (bsg_req->msgcode) { 4905 case ISCSI_BSG_HST_VENDOR: 4906 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4907 job->request_payload.payload_len, 4908 &nonemb_cmd.dma); 4909 if (nonemb_cmd.va == NULL) { 4910 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4911 "BM_%d : Failed to allocate memory for " 4912 "beiscsi_bsg_request\n"); 4913 return -ENOMEM; 4914 } 4915 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4916 &nonemb_cmd); 4917 if (!tag) { 4918 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4919 "BM_%d : MBX Tag Allocation Failed\n"); 4920 4921 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4922 nonemb_cmd.va, nonemb_cmd.dma); 4923 return -EAGAIN; 4924 } 4925 4926 rc = wait_event_interruptible_timeout( 4927 phba->ctrl.mcc_wait[tag], 4928 phba->ctrl.mcc_tag_status[tag], 4929 msecs_to_jiffies( 4930 BEISCSI_HOST_MBX_TIMEOUT)); 4931 4932 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4933 clear_bit(MCC_TAG_STATE_RUNNING, 4934 &phba->ctrl.ptag_state[tag].tag_state); 4935 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4936 nonemb_cmd.va, nonemb_cmd.dma); 4937 return -EIO; 4938 } 4939 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4940 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4941 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4942 free_mcc_wrb(&phba->ctrl, tag); 4943 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4944 sg_copy_from_buffer(job->reply_payload.sg_list, 4945 job->reply_payload.sg_cnt, 4946 nonemb_cmd.va, (resp->response_length 4947 + sizeof(*resp))); 4948 bsg_reply->reply_payload_rcv_len = resp->response_length; 4949 bsg_reply->result = status; 4950 bsg_job_done(job, bsg_reply->result, 4951 bsg_reply->reply_payload_rcv_len); 4952 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4953 nonemb_cmd.va, nonemb_cmd.dma); 4954 if (status || extd_status) { 4955 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4956 "BM_%d : MBX Cmd Failed" 4957 " status = %d extd_status = %d\n", 4958 status, extd_status); 4959 4960 return -EIO; 4961 } else { 4962 rc = 0; 4963 } 4964 break; 4965 4966 default: 4967 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4968 "BM_%d : Unsupported bsg command: 0x%x\n", 4969 bsg_req->msgcode); 4970 break; 4971 } 4972 4973 return rc; 4974 } 4975 4976 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4977 { 4978 /* Set the logging parameter */ 4979 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4980 } 4981 4982 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4983 { 4984 if (phba->boot_struct.boot_kset) 4985 return; 4986 4987 /* skip if boot work is already in progress */ 4988 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4989 return; 4990 4991 phba->boot_struct.retry = 3; 4992 phba->boot_struct.tag = 0; 4993 phba->boot_struct.s_handle = s_handle; 4994 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4995 schedule_work(&phba->boot_work); 4996 } 4997 4998 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4999 { 5000 struct beiscsi_hba *phba = data; 5001 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 5002 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 5003 char *str = buf; 5004 int rc = -EPERM; 5005 5006 switch (type) { 5007 case ISCSI_BOOT_TGT_NAME: 5008 rc = sprintf(buf, "%.*s\n", 5009 (int)strlen(boot_sess->target_name), 5010 (char *)&boot_sess->target_name); 5011 break; 5012 case ISCSI_BOOT_TGT_IP_ADDR: 5013 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 5014 rc = sprintf(buf, "%pI4\n", 5015 (char *)&boot_conn->dest_ipaddr.addr); 5016 else 5017 rc = sprintf(str, "%pI6\n", 5018 (char *)&boot_conn->dest_ipaddr.addr); 5019 break; 5020 case ISCSI_BOOT_TGT_PORT: 5021 rc = sprintf(str, "%d\n", boot_conn->dest_port); 5022 break; 5023 5024 case ISCSI_BOOT_TGT_CHAP_NAME: 5025 rc = sprintf(str, "%.*s\n", 5026 boot_conn->negotiated_login_options.auth_data.chap. 5027 target_chap_name_length, 5028 (char *)&boot_conn->negotiated_login_options. 5029 auth_data.chap.target_chap_name); 5030 break; 5031 case ISCSI_BOOT_TGT_CHAP_SECRET: 5032 rc = sprintf(str, "%.*s\n", 5033 boot_conn->negotiated_login_options.auth_data.chap. 5034 target_secret_length, 5035 (char *)&boot_conn->negotiated_login_options. 5036 auth_data.chap.target_secret); 5037 break; 5038 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5039 rc = sprintf(str, "%.*s\n", 5040 boot_conn->negotiated_login_options.auth_data.chap. 5041 intr_chap_name_length, 5042 (char *)&boot_conn->negotiated_login_options. 5043 auth_data.chap.intr_chap_name); 5044 break; 5045 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5046 rc = sprintf(str, "%.*s\n", 5047 boot_conn->negotiated_login_options.auth_data.chap. 5048 intr_secret_length, 5049 (char *)&boot_conn->negotiated_login_options. 5050 auth_data.chap.intr_secret); 5051 break; 5052 case ISCSI_BOOT_TGT_FLAGS: 5053 rc = sprintf(str, "2\n"); 5054 break; 5055 case ISCSI_BOOT_TGT_NIC_ASSOC: 5056 rc = sprintf(str, "0\n"); 5057 break; 5058 } 5059 return rc; 5060 } 5061 5062 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 5063 { 5064 struct beiscsi_hba *phba = data; 5065 char *str = buf; 5066 int rc = -EPERM; 5067 5068 switch (type) { 5069 case ISCSI_BOOT_INI_INITIATOR_NAME: 5070 rc = sprintf(str, "%s\n", 5071 phba->boot_struct.boot_sess.initiator_iscsiname); 5072 break; 5073 } 5074 return rc; 5075 } 5076 5077 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5078 { 5079 struct beiscsi_hba *phba = data; 5080 char *str = buf; 5081 int rc = -EPERM; 5082 5083 switch (type) { 5084 case ISCSI_BOOT_ETH_FLAGS: 5085 rc = sprintf(str, "2\n"); 5086 break; 5087 case ISCSI_BOOT_ETH_INDEX: 5088 rc = sprintf(str, "0\n"); 5089 break; 5090 case ISCSI_BOOT_ETH_MAC: 5091 rc = beiscsi_get_macaddr(str, phba); 5092 break; 5093 } 5094 return rc; 5095 } 5096 5097 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5098 { 5099 umode_t rc = 0; 5100 5101 switch (type) { 5102 case ISCSI_BOOT_TGT_NAME: 5103 case ISCSI_BOOT_TGT_IP_ADDR: 5104 case ISCSI_BOOT_TGT_PORT: 5105 case ISCSI_BOOT_TGT_CHAP_NAME: 5106 case ISCSI_BOOT_TGT_CHAP_SECRET: 5107 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5108 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5109 case ISCSI_BOOT_TGT_NIC_ASSOC: 5110 case ISCSI_BOOT_TGT_FLAGS: 5111 rc = S_IRUGO; 5112 break; 5113 } 5114 return rc; 5115 } 5116 5117 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5118 { 5119 umode_t rc = 0; 5120 5121 switch (type) { 5122 case ISCSI_BOOT_INI_INITIATOR_NAME: 5123 rc = S_IRUGO; 5124 break; 5125 } 5126 return rc; 5127 } 5128 5129 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5130 { 5131 umode_t rc = 0; 5132 5133 switch (type) { 5134 case ISCSI_BOOT_ETH_FLAGS: 5135 case ISCSI_BOOT_ETH_MAC: 5136 case ISCSI_BOOT_ETH_INDEX: 5137 rc = S_IRUGO; 5138 break; 5139 } 5140 return rc; 5141 } 5142 5143 static void beiscsi_boot_kobj_release(void *data) 5144 { 5145 struct beiscsi_hba *phba = data; 5146 5147 scsi_host_put(phba->shost); 5148 } 5149 5150 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5151 { 5152 struct boot_struct *bs = &phba->boot_struct; 5153 struct iscsi_boot_kobj *boot_kobj; 5154 5155 if (bs->boot_kset) { 5156 __beiscsi_log(phba, KERN_ERR, 5157 "BM_%d: boot_kset already created\n"); 5158 return 0; 5159 } 5160 5161 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5162 if (!bs->boot_kset) { 5163 __beiscsi_log(phba, KERN_ERR, 5164 "BM_%d: boot_kset alloc failed\n"); 5165 return -ENOMEM; 5166 } 5167 5168 /* get shost ref because the show function will refer phba */ 5169 if (!scsi_host_get(phba->shost)) 5170 goto free_kset; 5171 5172 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5173 beiscsi_show_boot_tgt_info, 5174 beiscsi_tgt_get_attr_visibility, 5175 beiscsi_boot_kobj_release); 5176 if (!boot_kobj) 5177 goto put_shost; 5178 5179 if (!scsi_host_get(phba->shost)) 5180 goto free_kset; 5181 5182 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5183 beiscsi_show_boot_ini_info, 5184 beiscsi_ini_get_attr_visibility, 5185 beiscsi_boot_kobj_release); 5186 if (!boot_kobj) 5187 goto put_shost; 5188 5189 if (!scsi_host_get(phba->shost)) 5190 goto free_kset; 5191 5192 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5193 beiscsi_show_boot_eth_info, 5194 beiscsi_eth_get_attr_visibility, 5195 beiscsi_boot_kobj_release); 5196 if (!boot_kobj) 5197 goto put_shost; 5198 5199 return 0; 5200 5201 put_shost: 5202 scsi_host_put(phba->shost); 5203 free_kset: 5204 iscsi_boot_destroy_kset(bs->boot_kset); 5205 bs->boot_kset = NULL; 5206 return -ENOMEM; 5207 } 5208 5209 static void beiscsi_boot_work(struct work_struct *work) 5210 { 5211 struct beiscsi_hba *phba = 5212 container_of(work, struct beiscsi_hba, boot_work); 5213 struct boot_struct *bs = &phba->boot_struct; 5214 unsigned int tag = 0; 5215 5216 if (!beiscsi_hba_is_online(phba)) 5217 return; 5218 5219 beiscsi_log(phba, KERN_INFO, 5220 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5221 "BM_%d : %s action %d\n", 5222 __func__, phba->boot_struct.action); 5223 5224 switch (phba->boot_struct.action) { 5225 case BEISCSI_BOOT_REOPEN_SESS: 5226 tag = beiscsi_boot_reopen_sess(phba); 5227 break; 5228 case BEISCSI_BOOT_GET_SHANDLE: 5229 tag = __beiscsi_boot_get_shandle(phba, 1); 5230 break; 5231 case BEISCSI_BOOT_GET_SINFO: 5232 tag = beiscsi_boot_get_sinfo(phba); 5233 break; 5234 case BEISCSI_BOOT_LOGOUT_SESS: 5235 tag = beiscsi_boot_logout_sess(phba); 5236 break; 5237 case BEISCSI_BOOT_CREATE_KSET: 5238 beiscsi_boot_create_kset(phba); 5239 /** 5240 * updated boot_kset is made visible to all before 5241 * ending the boot work. 5242 */ 5243 mb(); 5244 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5245 return; 5246 } 5247 if (!tag) { 5248 if (bs->retry--) 5249 schedule_work(&phba->boot_work); 5250 else 5251 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5252 } 5253 } 5254 5255 static void beiscsi_eqd_update_work(struct work_struct *work) 5256 { 5257 struct hwi_context_memory *phwi_context; 5258 struct be_set_eqd set_eqd[MAX_CPUS]; 5259 struct hwi_controller *phwi_ctrlr; 5260 struct be_eq_obj *pbe_eq; 5261 struct beiscsi_hba *phba; 5262 unsigned int pps, delta; 5263 struct be_aic_obj *aic; 5264 int eqd, i, num = 0; 5265 unsigned long now; 5266 5267 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5268 if (!beiscsi_hba_is_online(phba)) 5269 return; 5270 5271 phwi_ctrlr = phba->phwi_ctrlr; 5272 phwi_context = phwi_ctrlr->phwi_ctxt; 5273 5274 for (i = 0; i <= phba->num_cpus; i++) { 5275 aic = &phba->aic_obj[i]; 5276 pbe_eq = &phwi_context->be_eq[i]; 5277 now = jiffies; 5278 if (!aic->jiffies || time_before(now, aic->jiffies) || 5279 pbe_eq->cq_count < aic->eq_prev) { 5280 aic->jiffies = now; 5281 aic->eq_prev = pbe_eq->cq_count; 5282 continue; 5283 } 5284 delta = jiffies_to_msecs(now - aic->jiffies); 5285 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5286 eqd = (pps / 1500) << 2; 5287 5288 if (eqd < 8) 5289 eqd = 0; 5290 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5291 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5292 5293 aic->jiffies = now; 5294 aic->eq_prev = pbe_eq->cq_count; 5295 5296 if (eqd != aic->prev_eqd) { 5297 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5298 set_eqd[num].eq_id = pbe_eq->q.id; 5299 aic->prev_eqd = eqd; 5300 num++; 5301 } 5302 } 5303 if (num) 5304 /* completion of this is ignored */ 5305 beiscsi_modify_eq_delay(phba, set_eqd, num); 5306 5307 schedule_delayed_work(&phba->eqd_update, 5308 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5309 } 5310 5311 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 5312 { 5313 int i, status; 5314 5315 for (i = 0; i <= phba->num_cpus; i++) 5316 phba->msix_entries[i].entry = i; 5317 5318 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, 5319 phba->num_cpus + 1, phba->num_cpus + 1); 5320 if (status > 0) 5321 phba->msix_enabled = true; 5322 } 5323 5324 static void beiscsi_hw_tpe_check(unsigned long ptr) 5325 { 5326 struct beiscsi_hba *phba; 5327 u32 wait; 5328 5329 phba = (struct beiscsi_hba *)ptr; 5330 /* if not TPE, do nothing */ 5331 if (!beiscsi_detect_tpe(phba)) 5332 return; 5333 5334 /* wait default 4000ms before recovering */ 5335 wait = 4000; 5336 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5337 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5338 queue_delayed_work(phba->wq, &phba->recover_port, 5339 msecs_to_jiffies(wait)); 5340 } 5341 5342 static void beiscsi_hw_health_check(unsigned long ptr) 5343 { 5344 struct beiscsi_hba *phba; 5345 5346 phba = (struct beiscsi_hba *)ptr; 5347 beiscsi_detect_ue(phba); 5348 if (beiscsi_detect_ue(phba)) { 5349 __beiscsi_log(phba, KERN_ERR, 5350 "BM_%d : port in error: %lx\n", phba->state); 5351 /* sessions are no longer valid, so first fail the sessions */ 5352 queue_work(phba->wq, &phba->sess_work); 5353 5354 /* detect UER supported */ 5355 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5356 return; 5357 /* modify this timer to check TPE */ 5358 phba->hw_check.function = beiscsi_hw_tpe_check; 5359 } 5360 5361 mod_timer(&phba->hw_check, 5362 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5363 } 5364 5365 /* 5366 * beiscsi_enable_port()- Enables the disabled port. 5367 * Only port resources freed in disable function are reallocated. 5368 * This is called in HBA error handling path. 5369 * 5370 * @phba: Instance of driver private structure 5371 * 5372 **/ 5373 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5374 { 5375 struct hwi_context_memory *phwi_context; 5376 struct hwi_controller *phwi_ctrlr; 5377 struct be_eq_obj *pbe_eq; 5378 int ret, i; 5379 5380 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5381 __beiscsi_log(phba, KERN_ERR, 5382 "BM_%d : %s : port is online %lx\n", 5383 __func__, phba->state); 5384 return 0; 5385 } 5386 5387 ret = beiscsi_init_sliport(phba); 5388 if (ret) 5389 return ret; 5390 5391 if (enable_msix) 5392 find_num_cpus(phba); 5393 else 5394 phba->num_cpus = 1; 5395 if (enable_msix) { 5396 beiscsi_msix_enable(phba); 5397 if (!phba->msix_enabled) 5398 phba->num_cpus = 1; 5399 } 5400 5401 beiscsi_get_params(phba); 5402 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5403 beiscsi_set_uer_feature(phba); 5404 5405 phba->shost->max_id = phba->params.cxns_per_ctrl; 5406 phba->shost->can_queue = phba->params.ios_per_ctrl; 5407 ret = hwi_init_controller(phba); 5408 if (ret) { 5409 __beiscsi_log(phba, KERN_ERR, 5410 "BM_%d : init controller failed %d\n", ret); 5411 goto disable_msix; 5412 } 5413 5414 for (i = 0; i < MAX_MCC_CMD; i++) { 5415 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5416 phba->ctrl.mcc_tag[i] = i + 1; 5417 phba->ctrl.mcc_tag_status[i + 1] = 0; 5418 phba->ctrl.mcc_tag_available++; 5419 } 5420 5421 phwi_ctrlr = phba->phwi_ctrlr; 5422 phwi_context = phwi_ctrlr->phwi_ctxt; 5423 for (i = 0; i < phba->num_cpus; i++) { 5424 pbe_eq = &phwi_context->be_eq[i]; 5425 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5426 } 5427 5428 i = (phba->msix_enabled) ? i : 0; 5429 /* Work item for MCC handling */ 5430 pbe_eq = &phwi_context->be_eq[i]; 5431 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5432 5433 ret = beiscsi_init_irqs(phba); 5434 if (ret < 0) { 5435 __beiscsi_log(phba, KERN_ERR, 5436 "BM_%d : setup IRQs failed %d\n", ret); 5437 goto cleanup_port; 5438 } 5439 hwi_enable_intr(phba); 5440 /* port operational: clear all error bits */ 5441 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5442 __beiscsi_log(phba, KERN_INFO, 5443 "BM_%d : port online: 0x%lx\n", phba->state); 5444 5445 /* start hw_check timer and eqd_update work */ 5446 schedule_delayed_work(&phba->eqd_update, 5447 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5448 5449 /** 5450 * Timer function gets modified for TPE detection. 5451 * Always reinit to do health check first. 5452 */ 5453 phba->hw_check.function = beiscsi_hw_health_check; 5454 mod_timer(&phba->hw_check, 5455 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5456 return 0; 5457 5458 cleanup_port: 5459 for (i = 0; i < phba->num_cpus; i++) { 5460 pbe_eq = &phwi_context->be_eq[i]; 5461 irq_poll_disable(&pbe_eq->iopoll); 5462 } 5463 hwi_cleanup_port(phba); 5464 5465 disable_msix: 5466 if (phba->msix_enabled) 5467 pci_disable_msix(phba->pcidev); 5468 5469 return ret; 5470 } 5471 5472 /* 5473 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5474 * This is called in HBA error handling and driver removal. 5475 * @phba: Instance Priv structure 5476 * @unload: indicate driver is unloading 5477 * 5478 * Free the OS and HW resources held by the driver 5479 **/ 5480 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5481 { 5482 struct hwi_context_memory *phwi_context; 5483 struct hwi_controller *phwi_ctrlr; 5484 struct be_eq_obj *pbe_eq; 5485 unsigned int i, msix_vec; 5486 5487 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5488 return; 5489 5490 phwi_ctrlr = phba->phwi_ctrlr; 5491 phwi_context = phwi_ctrlr->phwi_ctxt; 5492 hwi_disable_intr(phba); 5493 if (phba->msix_enabled) { 5494 for (i = 0; i <= phba->num_cpus; i++) { 5495 msix_vec = phba->msix_entries[i].vector; 5496 free_irq(msix_vec, &phwi_context->be_eq[i]); 5497 kfree(phba->msi_name[i]); 5498 } 5499 } else 5500 if (phba->pcidev->irq) 5501 free_irq(phba->pcidev->irq, phba); 5502 pci_disable_msix(phba->pcidev); 5503 5504 for (i = 0; i < phba->num_cpus; i++) { 5505 pbe_eq = &phwi_context->be_eq[i]; 5506 irq_poll_disable(&pbe_eq->iopoll); 5507 } 5508 cancel_delayed_work_sync(&phba->eqd_update); 5509 cancel_work_sync(&phba->boot_work); 5510 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5511 if (!unload && beiscsi_hba_in_error(phba)) { 5512 pbe_eq = &phwi_context->be_eq[i]; 5513 cancel_work_sync(&pbe_eq->mcc_work); 5514 } 5515 hwi_cleanup_port(phba); 5516 } 5517 5518 static void beiscsi_sess_work(struct work_struct *work) 5519 { 5520 struct beiscsi_hba *phba; 5521 5522 phba = container_of(work, struct beiscsi_hba, sess_work); 5523 /* 5524 * This work gets scheduled only in case of HBA error. 5525 * Old sessions are gone so need to be re-established. 5526 * iscsi_session_failure needs process context hence this work. 5527 */ 5528 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5529 } 5530 5531 static void beiscsi_recover_port(struct work_struct *work) 5532 { 5533 struct beiscsi_hba *phba; 5534 5535 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5536 beiscsi_disable_port(phba, 0); 5537 beiscsi_enable_port(phba); 5538 } 5539 5540 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5541 pci_channel_state_t state) 5542 { 5543 struct beiscsi_hba *phba = NULL; 5544 5545 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5546 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5547 5548 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5549 "BM_%d : EEH error detected\n"); 5550 5551 /* first stop UE detection when PCI error detected */ 5552 del_timer_sync(&phba->hw_check); 5553 cancel_delayed_work_sync(&phba->recover_port); 5554 5555 /* sessions are no longer valid, so first fail the sessions */ 5556 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5557 beiscsi_disable_port(phba, 0); 5558 5559 if (state == pci_channel_io_perm_failure) { 5560 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5561 "BM_%d : EEH : State PERM Failure"); 5562 return PCI_ERS_RESULT_DISCONNECT; 5563 } 5564 5565 pci_disable_device(pdev); 5566 5567 /* The error could cause the FW to trigger a flash debug dump. 5568 * Resetting the card while flash dump is in progress 5569 * can cause it not to recover; wait for it to finish. 5570 * Wait only for first function as it is needed only once per 5571 * adapter. 5572 **/ 5573 if (pdev->devfn == 0) 5574 ssleep(30); 5575 5576 return PCI_ERS_RESULT_NEED_RESET; 5577 } 5578 5579 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5580 { 5581 struct beiscsi_hba *phba = NULL; 5582 int status = 0; 5583 5584 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5585 5586 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5587 "BM_%d : EEH Reset\n"); 5588 5589 status = pci_enable_device(pdev); 5590 if (status) 5591 return PCI_ERS_RESULT_DISCONNECT; 5592 5593 pci_set_master(pdev); 5594 pci_set_power_state(pdev, PCI_D0); 5595 pci_restore_state(pdev); 5596 5597 status = beiscsi_check_fw_rdy(phba); 5598 if (status) { 5599 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5600 "BM_%d : EEH Reset Completed\n"); 5601 } else { 5602 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5603 "BM_%d : EEH Reset Completion Failure\n"); 5604 return PCI_ERS_RESULT_DISCONNECT; 5605 } 5606 5607 pci_cleanup_aer_uncorrect_error_status(pdev); 5608 return PCI_ERS_RESULT_RECOVERED; 5609 } 5610 5611 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5612 { 5613 struct beiscsi_hba *phba; 5614 int ret; 5615 5616 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5617 pci_save_state(pdev); 5618 5619 ret = beiscsi_enable_port(phba); 5620 if (ret) 5621 __beiscsi_log(phba, KERN_ERR, 5622 "BM_%d : AER EEH resume failed\n"); 5623 } 5624 5625 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5626 const struct pci_device_id *id) 5627 { 5628 struct beiscsi_hba *phba = NULL; 5629 struct hwi_controller *phwi_ctrlr; 5630 struct hwi_context_memory *phwi_context; 5631 struct be_eq_obj *pbe_eq; 5632 unsigned int s_handle; 5633 int ret, i; 5634 5635 ret = beiscsi_enable_pci(pcidev); 5636 if (ret < 0) { 5637 dev_err(&pcidev->dev, 5638 "beiscsi_dev_probe - Failed to enable pci device\n"); 5639 return ret; 5640 } 5641 5642 phba = beiscsi_hba_alloc(pcidev); 5643 if (!phba) { 5644 dev_err(&pcidev->dev, 5645 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5646 ret = -ENOMEM; 5647 goto disable_pci; 5648 } 5649 5650 /* Enable EEH reporting */ 5651 ret = pci_enable_pcie_error_reporting(pcidev); 5652 if (ret) 5653 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5654 "BM_%d : PCIe Error Reporting " 5655 "Enabling Failed\n"); 5656 5657 pci_save_state(pcidev); 5658 5659 /* Initialize Driver configuration Paramters */ 5660 beiscsi_hba_attrs_init(phba); 5661 5662 phba->mac_addr_set = false; 5663 5664 switch (pcidev->device) { 5665 case BE_DEVICE_ID1: 5666 case OC_DEVICE_ID1: 5667 case OC_DEVICE_ID2: 5668 phba->generation = BE_GEN2; 5669 phba->iotask_fn = beiscsi_iotask; 5670 break; 5671 case BE_DEVICE_ID2: 5672 case OC_DEVICE_ID3: 5673 phba->generation = BE_GEN3; 5674 phba->iotask_fn = beiscsi_iotask; 5675 break; 5676 case OC_SKH_ID1: 5677 phba->generation = BE_GEN4; 5678 phba->iotask_fn = beiscsi_iotask_v2; 5679 break; 5680 default: 5681 phba->generation = 0; 5682 } 5683 5684 ret = be_ctrl_init(phba, pcidev); 5685 if (ret) { 5686 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5687 "BM_%d : be_ctrl_init failed\n"); 5688 goto hba_free; 5689 } 5690 5691 ret = beiscsi_init_sliport(phba); 5692 if (ret) 5693 goto hba_free; 5694 5695 spin_lock_init(&phba->io_sgl_lock); 5696 spin_lock_init(&phba->mgmt_sgl_lock); 5697 spin_lock_init(&phba->async_pdu_lock); 5698 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5699 if (ret != 0) { 5700 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5701 "BM_%d : Error getting fw config\n"); 5702 goto free_port; 5703 } 5704 beiscsi_get_port_name(&phba->ctrl, phba); 5705 beiscsi_get_params(phba); 5706 beiscsi_set_uer_feature(phba); 5707 5708 if (enable_msix) 5709 find_num_cpus(phba); 5710 else 5711 phba->num_cpus = 1; 5712 5713 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5714 "BM_%d : num_cpus = %d\n", 5715 phba->num_cpus); 5716 5717 if (enable_msix) { 5718 beiscsi_msix_enable(phba); 5719 if (!phba->msix_enabled) 5720 phba->num_cpus = 1; 5721 } 5722 5723 phba->shost->max_id = phba->params.cxns_per_ctrl; 5724 phba->shost->can_queue = phba->params.ios_per_ctrl; 5725 ret = beiscsi_init_port(phba); 5726 if (ret < 0) { 5727 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5728 "BM_%d : beiscsi_dev_probe-" 5729 "Failed in beiscsi_init_port\n"); 5730 goto free_port; 5731 } 5732 5733 for (i = 0; i < MAX_MCC_CMD; i++) { 5734 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5735 phba->ctrl.mcc_tag[i] = i + 1; 5736 phba->ctrl.mcc_tag_status[i + 1] = 0; 5737 phba->ctrl.mcc_tag_available++; 5738 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5739 sizeof(struct be_dma_mem)); 5740 } 5741 5742 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5743 5744 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 5745 phba->shost->host_no); 5746 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); 5747 if (!phba->wq) { 5748 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5749 "BM_%d : beiscsi_dev_probe-" 5750 "Failed to allocate work queue\n"); 5751 ret = -ENOMEM; 5752 goto free_twq; 5753 } 5754 5755 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5756 5757 phwi_ctrlr = phba->phwi_ctrlr; 5758 phwi_context = phwi_ctrlr->phwi_ctxt; 5759 5760 for (i = 0; i < phba->num_cpus; i++) { 5761 pbe_eq = &phwi_context->be_eq[i]; 5762 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5763 } 5764 5765 i = (phba->msix_enabled) ? i : 0; 5766 /* Work item for MCC handling */ 5767 pbe_eq = &phwi_context->be_eq[i]; 5768 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5769 5770 ret = beiscsi_init_irqs(phba); 5771 if (ret < 0) { 5772 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5773 "BM_%d : beiscsi_dev_probe-" 5774 "Failed to beiscsi_init_irqs\n"); 5775 goto free_blkenbld; 5776 } 5777 hwi_enable_intr(phba); 5778 5779 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5780 if (ret) 5781 goto free_blkenbld; 5782 5783 /* set online bit after port is operational */ 5784 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5785 __beiscsi_log(phba, KERN_INFO, 5786 "BM_%d : port online: 0x%lx\n", phba->state); 5787 5788 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5789 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5790 if (ret > 0) { 5791 beiscsi_start_boot_work(phba, s_handle); 5792 /** 5793 * Set this bit after starting the work to let 5794 * probe handle it first. 5795 * ASYNC event can too schedule this work. 5796 */ 5797 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5798 } 5799 5800 beiscsi_iface_create_default(phba); 5801 schedule_delayed_work(&phba->eqd_update, 5802 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5803 5804 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5805 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5806 /** 5807 * Start UE detection here. UE before this will cause stall in probe 5808 * and eventually fail the probe. 5809 */ 5810 init_timer(&phba->hw_check); 5811 phba->hw_check.function = beiscsi_hw_health_check; 5812 phba->hw_check.data = (unsigned long)phba; 5813 mod_timer(&phba->hw_check, 5814 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5815 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5816 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5817 return 0; 5818 5819 free_blkenbld: 5820 destroy_workqueue(phba->wq); 5821 for (i = 0; i < phba->num_cpus; i++) { 5822 pbe_eq = &phwi_context->be_eq[i]; 5823 irq_poll_disable(&pbe_eq->iopoll); 5824 } 5825 free_twq: 5826 hwi_cleanup_port(phba); 5827 beiscsi_cleanup_port(phba); 5828 beiscsi_free_mem(phba); 5829 free_port: 5830 pci_free_consistent(phba->pcidev, 5831 phba->ctrl.mbox_mem_alloced.size, 5832 phba->ctrl.mbox_mem_alloced.va, 5833 phba->ctrl.mbox_mem_alloced.dma); 5834 beiscsi_unmap_pci_function(phba); 5835 hba_free: 5836 if (phba->msix_enabled) 5837 pci_disable_msix(phba->pcidev); 5838 pci_dev_put(phba->pcidev); 5839 iscsi_host_free(phba->shost); 5840 pci_set_drvdata(pcidev, NULL); 5841 disable_pci: 5842 pci_release_regions(pcidev); 5843 pci_disable_device(pcidev); 5844 return ret; 5845 } 5846 5847 static void beiscsi_remove(struct pci_dev *pcidev) 5848 { 5849 struct beiscsi_hba *phba = NULL; 5850 5851 phba = pci_get_drvdata(pcidev); 5852 if (!phba) { 5853 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5854 return; 5855 } 5856 5857 /* first stop UE detection before unloading */ 5858 del_timer_sync(&phba->hw_check); 5859 cancel_delayed_work_sync(&phba->recover_port); 5860 cancel_work_sync(&phba->sess_work); 5861 5862 beiscsi_iface_destroy_default(phba); 5863 iscsi_host_remove(phba->shost); 5864 beiscsi_disable_port(phba, 1); 5865 5866 /* after cancelling boot_work */ 5867 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5868 5869 /* free all resources */ 5870 destroy_workqueue(phba->wq); 5871 beiscsi_cleanup_port(phba); 5872 beiscsi_free_mem(phba); 5873 5874 /* ctrl uninit */ 5875 beiscsi_unmap_pci_function(phba); 5876 pci_free_consistent(phba->pcidev, 5877 phba->ctrl.mbox_mem_alloced.size, 5878 phba->ctrl.mbox_mem_alloced.va, 5879 phba->ctrl.mbox_mem_alloced.dma); 5880 5881 pci_dev_put(phba->pcidev); 5882 iscsi_host_free(phba->shost); 5883 pci_disable_pcie_error_reporting(pcidev); 5884 pci_set_drvdata(pcidev, NULL); 5885 pci_release_regions(pcidev); 5886 pci_disable_device(pcidev); 5887 } 5888 5889 5890 static struct pci_error_handlers beiscsi_eeh_handlers = { 5891 .error_detected = beiscsi_eeh_err_detected, 5892 .slot_reset = beiscsi_eeh_reset, 5893 .resume = beiscsi_eeh_resume, 5894 }; 5895 5896 struct iscsi_transport beiscsi_iscsi_transport = { 5897 .owner = THIS_MODULE, 5898 .name = DRV_NAME, 5899 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5900 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5901 .create_session = beiscsi_session_create, 5902 .destroy_session = beiscsi_session_destroy, 5903 .create_conn = beiscsi_conn_create, 5904 .bind_conn = beiscsi_conn_bind, 5905 .destroy_conn = iscsi_conn_teardown, 5906 .attr_is_visible = beiscsi_attr_is_visible, 5907 .set_iface_param = beiscsi_iface_set_param, 5908 .get_iface_param = beiscsi_iface_get_param, 5909 .set_param = beiscsi_set_param, 5910 .get_conn_param = iscsi_conn_get_param, 5911 .get_session_param = iscsi_session_get_param, 5912 .get_host_param = beiscsi_get_host_param, 5913 .start_conn = beiscsi_conn_start, 5914 .stop_conn = iscsi_conn_stop, 5915 .send_pdu = iscsi_conn_send_pdu, 5916 .xmit_task = beiscsi_task_xmit, 5917 .cleanup_task = beiscsi_cleanup_task, 5918 .alloc_pdu = beiscsi_alloc_pdu, 5919 .parse_pdu_itt = beiscsi_parse_pdu, 5920 .get_stats = beiscsi_conn_get_stats, 5921 .get_ep_param = beiscsi_ep_get_param, 5922 .ep_connect = beiscsi_ep_connect, 5923 .ep_poll = beiscsi_ep_poll, 5924 .ep_disconnect = beiscsi_ep_disconnect, 5925 .session_recovery_timedout = iscsi_session_recovery_timedout, 5926 .bsg_request = beiscsi_bsg_request, 5927 }; 5928 5929 static struct pci_driver beiscsi_pci_driver = { 5930 .name = DRV_NAME, 5931 .probe = beiscsi_dev_probe, 5932 .remove = beiscsi_remove, 5933 .id_table = beiscsi_pci_id_table, 5934 .err_handler = &beiscsi_eeh_handlers 5935 }; 5936 5937 static int __init beiscsi_module_init(void) 5938 { 5939 int ret; 5940 5941 beiscsi_scsi_transport = 5942 iscsi_register_transport(&beiscsi_iscsi_transport); 5943 if (!beiscsi_scsi_transport) { 5944 printk(KERN_ERR 5945 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5946 return -ENOMEM; 5947 } 5948 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5949 &beiscsi_iscsi_transport); 5950 5951 ret = pci_register_driver(&beiscsi_pci_driver); 5952 if (ret) { 5953 printk(KERN_ERR 5954 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5955 goto unregister_iscsi_transport; 5956 } 5957 return 0; 5958 5959 unregister_iscsi_transport: 5960 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5961 return ret; 5962 } 5963 5964 static void __exit beiscsi_module_exit(void) 5965 { 5966 pci_unregister_driver(&beiscsi_pci_driver); 5967 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5968 } 5969 5970 module_init(beiscsi_module_init); 5971 module_exit(beiscsi_module_exit); 5972