1 /* 2 * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI 3 * Host Bus Adapters. Refer to the README file included with this package 4 * for driver version and adapter compatibility. 5 * 6 * Copyright (c) 2018 Broadcom. All Rights Reserved. 7 * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as published 11 * by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful. ALL EXPRESS 14 * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY 15 * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, 16 * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH 17 * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 18 * See the GNU General Public License for more details, a copy of which 19 * can be found in the file COPYING included with this package. 20 * 21 * Contact Information: 22 * linux-drivers@broadcom.com 23 * 24 */ 25 26 #include <linux/reboot.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/interrupt.h> 30 #include <linux/blkdev.h> 31 #include <linux/pci.h> 32 #include <linux/string.h> 33 #include <linux/kernel.h> 34 #include <linux/semaphore.h> 35 #include <linux/iscsi_boot_sysfs.h> 36 #include <linux/module.h> 37 #include <linux/bsg-lib.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/libiscsi.h> 41 #include <scsi/scsi_bsg_iscsi.h> 42 #include <scsi/scsi_netlink.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 #include <scsi/scsi_transport.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi.h> 49 #include "be_main.h" 50 #include "be_iscsi.h" 51 #include "be_mgmt.h" 52 #include "be_cmds.h" 53 54 static unsigned int be_iopoll_budget = 10; 55 static unsigned int be_max_phys_size = 64; 56 static unsigned int enable_msix = 1; 57 58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 59 MODULE_VERSION(BUILD_STR); 60 MODULE_AUTHOR("Emulex Corporation"); 61 MODULE_LICENSE("GPL"); 62 module_param(be_iopoll_budget, int, 0); 63 module_param(enable_msix, int, 0); 64 module_param(be_max_phys_size, uint, S_IRUGO); 65 MODULE_PARM_DESC(be_max_phys_size, 66 "Maximum Size (In Kilobytes) of physically contiguous " 67 "memory that can be allocated. Range is 16 - 128"); 68 69 #define beiscsi_disp_param(_name)\ 70 static ssize_t \ 71 beiscsi_##_name##_disp(struct device *dev,\ 72 struct device_attribute *attrib, char *buf) \ 73 { \ 74 struct Scsi_Host *shost = class_to_shost(dev);\ 75 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 76 return snprintf(buf, PAGE_SIZE, "%d\n",\ 77 phba->attr_##_name);\ 78 } 79 80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 81 static int \ 82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 83 {\ 84 if (val >= _minval && val <= _maxval) {\ 85 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 86 "BA_%d : beiscsi_"#_name" updated "\ 87 "from 0x%x ==> 0x%x\n",\ 88 phba->attr_##_name, val); \ 89 phba->attr_##_name = val;\ 90 return 0;\ 91 } \ 92 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 93 "BA_%d beiscsi_"#_name" attribute "\ 94 "cannot be updated to 0x%x, "\ 95 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 96 return -EINVAL;\ 97 } 98 99 #define beiscsi_store_param(_name) \ 100 static ssize_t \ 101 beiscsi_##_name##_store(struct device *dev,\ 102 struct device_attribute *attr, const char *buf,\ 103 size_t count) \ 104 { \ 105 struct Scsi_Host *shost = class_to_shost(dev);\ 106 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 107 uint32_t param_val = 0;\ 108 if (!isdigit(buf[0]))\ 109 return -EINVAL;\ 110 if (sscanf(buf, "%i", ¶m_val) != 1)\ 111 return -EINVAL;\ 112 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 113 return strlen(buf);\ 114 else \ 115 return -EINVAL;\ 116 } 117 118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 119 static int \ 120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 121 { \ 122 if (val >= _minval && val <= _maxval) {\ 123 phba->attr_##_name = val;\ 124 return 0;\ 125 } \ 126 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 127 "BA_%d beiscsi_"#_name" attribute " \ 128 "cannot be updated to 0x%x, "\ 129 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 130 phba->attr_##_name = _defval;\ 131 return -EINVAL;\ 132 } 133 134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 135 static uint beiscsi_##_name = _defval;\ 136 module_param(beiscsi_##_name, uint, S_IRUGO);\ 137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 138 beiscsi_disp_param(_name)\ 139 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 140 beiscsi_store_param(_name)\ 141 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 142 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 143 beiscsi_##_name##_disp, beiscsi_##_name##_store) 144 145 /* 146 * When new log level added update the 147 * the MAX allowed value for log_enable 148 */ 149 BEISCSI_RW_ATTR(log_enable, 0x00, 150 0xFF, 0x00, "Enable logging Bit Mask\n" 151 "\t\t\t\tInitialization Events : 0x01\n" 152 "\t\t\t\tMailbox Events : 0x02\n" 153 "\t\t\t\tMiscellaneous Events : 0x04\n" 154 "\t\t\t\tError Handling : 0x08\n" 155 "\t\t\t\tIO Path Events : 0x10\n" 156 "\t\t\t\tConfiguration Path : 0x20\n" 157 "\t\t\t\tiSCSI Protocol : 0x40\n"); 158 159 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 160 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 161 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 162 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 163 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 164 beiscsi_active_session_disp, NULL); 165 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 166 beiscsi_free_session_disp, NULL); 167 struct device_attribute *beiscsi_attrs[] = { 168 &dev_attr_beiscsi_log_enable, 169 &dev_attr_beiscsi_drvr_ver, 170 &dev_attr_beiscsi_adapter_family, 171 &dev_attr_beiscsi_fw_ver, 172 &dev_attr_beiscsi_active_session_count, 173 &dev_attr_beiscsi_free_session_count, 174 &dev_attr_beiscsi_phys_port, 175 NULL, 176 }; 177 178 static char const *cqe_desc[] = { 179 "RESERVED_DESC", 180 "SOL_CMD_COMPLETE", 181 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 182 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 183 "CXN_KILLED_BURST_LEN_MISMATCH", 184 "CXN_KILLED_AHS_RCVD", 185 "CXN_KILLED_HDR_DIGEST_ERR", 186 "CXN_KILLED_UNKNOWN_HDR", 187 "CXN_KILLED_STALE_ITT_TTT_RCVD", 188 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 189 "CXN_KILLED_RST_RCVD", 190 "CXN_KILLED_TIMED_OUT", 191 "CXN_KILLED_RST_SENT", 192 "CXN_KILLED_FIN_RCVD", 193 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 194 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 195 "CXN_KILLED_OVER_RUN_RESIDUAL", 196 "CXN_KILLED_UNDER_RUN_RESIDUAL", 197 "CMD_KILLED_INVALID_STATSN_RCVD", 198 "CMD_KILLED_INVALID_R2T_RCVD", 199 "CMD_CXN_KILLED_LUN_INVALID", 200 "CMD_CXN_KILLED_ICD_INVALID", 201 "CMD_CXN_KILLED_ITT_INVALID", 202 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 203 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 204 "CXN_INVALIDATE_NOTIFY", 205 "CXN_INVALIDATE_INDEX_NOTIFY", 206 "CMD_INVALIDATED_NOTIFY", 207 "UNSOL_HDR_NOTIFY", 208 "UNSOL_DATA_NOTIFY", 209 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 210 "DRIVERMSG_NOTIFY", 211 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 212 "SOL_CMD_KILLED_DIF_ERR", 213 "CXN_KILLED_SYN_RCVD", 214 "CXN_KILLED_IMM_DATA_RCVD" 215 }; 216 217 static int beiscsi_slave_configure(struct scsi_device *sdev) 218 { 219 blk_queue_max_segment_size(sdev->request_queue, 65536); 220 return 0; 221 } 222 223 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 224 { 225 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 226 struct iscsi_cls_session *cls_session; 227 struct beiscsi_io_task *abrt_io_task; 228 struct beiscsi_conn *beiscsi_conn; 229 struct iscsi_session *session; 230 struct invldt_cmd_tbl inv_tbl; 231 struct beiscsi_hba *phba; 232 struct iscsi_conn *conn; 233 int rc; 234 235 cls_session = starget_to_session(scsi_target(sc->device)); 236 session = cls_session->dd_data; 237 238 /* check if we raced, task just got cleaned up under us */ 239 spin_lock_bh(&session->back_lock); 240 if (!abrt_task || !abrt_task->sc) { 241 spin_unlock_bh(&session->back_lock); 242 return SUCCESS; 243 } 244 /* get a task ref till FW processes the req for the ICD used */ 245 __iscsi_get_task(abrt_task); 246 abrt_io_task = abrt_task->dd_data; 247 conn = abrt_task->conn; 248 beiscsi_conn = conn->dd_data; 249 phba = beiscsi_conn->phba; 250 /* mark WRB invalid which have been not processed by FW yet */ 251 if (is_chip_be2_be3r(phba)) { 252 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 253 abrt_io_task->pwrb_handle->pwrb, 1); 254 } else { 255 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 256 abrt_io_task->pwrb_handle->pwrb, 1); 257 } 258 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 259 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 260 spin_unlock_bh(&session->back_lock); 261 262 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 263 iscsi_put_task(abrt_task); 264 if (rc) { 265 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 266 "BM_%d : sc %p invalidation failed %d\n", 267 sc, rc); 268 return FAILED; 269 } 270 271 return iscsi_eh_abort(sc); 272 } 273 274 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 275 { 276 struct beiscsi_invldt_cmd_tbl { 277 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 278 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 279 } *inv_tbl; 280 struct iscsi_cls_session *cls_session; 281 struct beiscsi_conn *beiscsi_conn; 282 struct beiscsi_io_task *io_task; 283 struct iscsi_session *session; 284 struct beiscsi_hba *phba; 285 struct iscsi_conn *conn; 286 struct iscsi_task *task; 287 unsigned int i, nents; 288 int rc, more = 0; 289 290 cls_session = starget_to_session(scsi_target(sc->device)); 291 session = cls_session->dd_data; 292 293 spin_lock_bh(&session->frwd_lock); 294 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 295 spin_unlock_bh(&session->frwd_lock); 296 return FAILED; 297 } 298 299 conn = session->leadconn; 300 beiscsi_conn = conn->dd_data; 301 phba = beiscsi_conn->phba; 302 303 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 304 if (!inv_tbl) { 305 spin_unlock_bh(&session->frwd_lock); 306 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 307 "BM_%d : invldt_cmd_tbl alloc failed\n"); 308 return FAILED; 309 } 310 nents = 0; 311 /* take back_lock to prevent task from getting cleaned up under us */ 312 spin_lock(&session->back_lock); 313 for (i = 0; i < conn->session->cmds_max; i++) { 314 task = conn->session->cmds[i]; 315 if (!task->sc) 316 continue; 317 318 if (sc->device->lun != task->sc->device->lun) 319 continue; 320 /** 321 * Can't fit in more cmds? Normally this won't happen b'coz 322 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 323 */ 324 if (nents == BE_INVLDT_CMD_TBL_SZ) { 325 more = 1; 326 break; 327 } 328 329 /* get a task ref till FW processes the req for the ICD used */ 330 __iscsi_get_task(task); 331 io_task = task->dd_data; 332 /* mark WRB invalid which have been not processed by FW yet */ 333 if (is_chip_be2_be3r(phba)) { 334 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 335 io_task->pwrb_handle->pwrb, 1); 336 } else { 337 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 338 io_task->pwrb_handle->pwrb, 1); 339 } 340 341 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 342 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 343 inv_tbl->task[nents] = task; 344 nents++; 345 } 346 spin_unlock(&session->back_lock); 347 spin_unlock_bh(&session->frwd_lock); 348 349 rc = SUCCESS; 350 if (!nents) 351 goto end_reset; 352 353 if (more) { 354 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 355 "BM_%d : number of cmds exceeds size of invalidation table\n"); 356 rc = FAILED; 357 goto end_reset; 358 } 359 360 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 362 "BM_%d : cid %u scmds invalidation failed\n", 363 beiscsi_conn->beiscsi_conn_cid); 364 rc = FAILED; 365 } 366 367 end_reset: 368 for (i = 0; i < nents; i++) 369 iscsi_put_task(inv_tbl->task[i]); 370 kfree(inv_tbl); 371 372 if (rc == SUCCESS) 373 rc = iscsi_eh_device_reset(sc); 374 return rc; 375 } 376 377 /*------------------- PCI Driver operations and data ----------------- */ 378 static const struct pci_device_id beiscsi_pci_id_table[] = { 379 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 380 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 381 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 382 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 383 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 384 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 385 { 0 } 386 }; 387 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 388 389 390 static struct scsi_host_template beiscsi_sht = { 391 .module = THIS_MODULE, 392 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 393 .proc_name = DRV_NAME, 394 .queuecommand = iscsi_queuecommand, 395 .change_queue_depth = scsi_change_queue_depth, 396 .slave_configure = beiscsi_slave_configure, 397 .target_alloc = iscsi_target_alloc, 398 .eh_timed_out = iscsi_eh_cmd_timed_out, 399 .eh_abort_handler = beiscsi_eh_abort, 400 .eh_device_reset_handler = beiscsi_eh_device_reset, 401 .eh_target_reset_handler = iscsi_eh_session_reset, 402 .shost_attrs = beiscsi_attrs, 403 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 404 .can_queue = BE2_IO_DEPTH, 405 .this_id = -1, 406 .max_sectors = BEISCSI_MAX_SECTORS, 407 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 408 .use_clustering = ENABLE_CLUSTERING, 409 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 410 .track_queue_depth = 1, 411 }; 412 413 static struct scsi_transport_template *beiscsi_scsi_transport; 414 415 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 416 { 417 struct beiscsi_hba *phba; 418 struct Scsi_Host *shost; 419 420 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 421 if (!shost) { 422 dev_err(&pcidev->dev, 423 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 424 return NULL; 425 } 426 shost->max_id = BE2_MAX_SESSIONS; 427 shost->max_channel = 0; 428 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 429 shost->max_lun = BEISCSI_NUM_MAX_LUN; 430 shost->transportt = beiscsi_scsi_transport; 431 phba = iscsi_host_priv(shost); 432 memset(phba, 0, sizeof(*phba)); 433 phba->shost = shost; 434 phba->pcidev = pci_dev_get(pcidev); 435 pci_set_drvdata(pcidev, phba); 436 phba->interface_handle = 0xFFFFFFFF; 437 438 return phba; 439 } 440 441 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 442 { 443 if (phba->csr_va) { 444 iounmap(phba->csr_va); 445 phba->csr_va = NULL; 446 } 447 if (phba->db_va) { 448 iounmap(phba->db_va); 449 phba->db_va = NULL; 450 } 451 if (phba->pci_va) { 452 iounmap(phba->pci_va); 453 phba->pci_va = NULL; 454 } 455 } 456 457 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 458 struct pci_dev *pcidev) 459 { 460 u8 __iomem *addr; 461 int pcicfg_reg; 462 463 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 464 pci_resource_len(pcidev, 2)); 465 if (addr == NULL) 466 return -ENOMEM; 467 phba->ctrl.csr = addr; 468 phba->csr_va = addr; 469 470 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 471 if (addr == NULL) 472 goto pci_map_err; 473 phba->ctrl.db = addr; 474 phba->db_va = addr; 475 476 if (phba->generation == BE_GEN2) 477 pcicfg_reg = 1; 478 else 479 pcicfg_reg = 0; 480 481 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 482 pci_resource_len(pcidev, pcicfg_reg)); 483 484 if (addr == NULL) 485 goto pci_map_err; 486 phba->ctrl.pcicfg = addr; 487 phba->pci_va = addr; 488 return 0; 489 490 pci_map_err: 491 beiscsi_unmap_pci_function(phba); 492 return -ENOMEM; 493 } 494 495 static int beiscsi_enable_pci(struct pci_dev *pcidev) 496 { 497 int ret; 498 499 ret = pci_enable_device(pcidev); 500 if (ret) { 501 dev_err(&pcidev->dev, 502 "beiscsi_enable_pci - enable device failed\n"); 503 return ret; 504 } 505 506 ret = pci_request_regions(pcidev, DRV_NAME); 507 if (ret) { 508 dev_err(&pcidev->dev, 509 "beiscsi_enable_pci - request region failed\n"); 510 goto pci_dev_disable; 511 } 512 513 pci_set_master(pcidev); 514 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 515 if (ret) { 516 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 517 if (ret) { 518 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 519 goto pci_region_release; 520 } else { 521 ret = pci_set_consistent_dma_mask(pcidev, 522 DMA_BIT_MASK(32)); 523 } 524 } else { 525 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 526 if (ret) { 527 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 528 goto pci_region_release; 529 } 530 } 531 return 0; 532 533 pci_region_release: 534 pci_release_regions(pcidev); 535 pci_dev_disable: 536 pci_disable_device(pcidev); 537 538 return ret; 539 } 540 541 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 542 { 543 struct be_ctrl_info *ctrl = &phba->ctrl; 544 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 545 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 546 int status = 0; 547 548 ctrl->pdev = pdev; 549 status = beiscsi_map_pci_bars(phba, pdev); 550 if (status) 551 return status; 552 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 553 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 554 mbox_mem_alloc->size, 555 &mbox_mem_alloc->dma); 556 if (!mbox_mem_alloc->va) { 557 beiscsi_unmap_pci_function(phba); 558 return -ENOMEM; 559 } 560 561 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 562 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 563 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 564 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 565 mutex_init(&ctrl->mbox_lock); 566 spin_lock_init(&phba->ctrl.mcc_lock); 567 568 return status; 569 } 570 571 /** 572 * beiscsi_get_params()- Set the config paramters 573 * @phba: ptr device priv structure 574 **/ 575 static void beiscsi_get_params(struct beiscsi_hba *phba) 576 { 577 uint32_t total_cid_count = 0; 578 uint32_t total_icd_count = 0; 579 uint8_t ulp_num = 0; 580 581 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 582 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 583 584 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 585 uint32_t align_mask = 0; 586 uint32_t icd_post_per_page = 0; 587 uint32_t icd_count_unavailable = 0; 588 uint32_t icd_start = 0, icd_count = 0; 589 uint32_t icd_start_align = 0, icd_count_align = 0; 590 591 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 592 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 593 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 594 595 /* Get ICD count that can be posted on each page */ 596 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 597 sizeof(struct iscsi_sge))); 598 align_mask = (icd_post_per_page - 1); 599 600 /* Check if icd_start is aligned ICD per page posting */ 601 if (icd_start % icd_post_per_page) { 602 icd_start_align = ((icd_start + 603 icd_post_per_page) & 604 ~(align_mask)); 605 phba->fw_config. 606 iscsi_icd_start[ulp_num] = 607 icd_start_align; 608 } 609 610 icd_count_align = (icd_count & ~align_mask); 611 612 /* ICD discarded in the process of alignment */ 613 if (icd_start_align) 614 icd_count_unavailable = ((icd_start_align - 615 icd_start) + 616 (icd_count - 617 icd_count_align)); 618 619 /* Updated ICD count available */ 620 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 621 icd_count_unavailable); 622 623 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 624 "BM_%d : Aligned ICD values\n" 625 "\t ICD Start : %d\n" 626 "\t ICD Count : %d\n" 627 "\t ICD Discarded : %d\n", 628 phba->fw_config. 629 iscsi_icd_start[ulp_num], 630 phba->fw_config. 631 iscsi_icd_count[ulp_num], 632 icd_count_unavailable); 633 break; 634 } 635 } 636 637 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 638 phba->params.ios_per_ctrl = (total_icd_count - 639 (total_cid_count + 640 BE2_TMFS + BE2_NOPOUT_REQ)); 641 phba->params.cxns_per_ctrl = total_cid_count; 642 phba->params.icds_per_ctrl = total_icd_count; 643 phba->params.num_sge_per_io = BE2_SGE; 644 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 645 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 646 phba->params.num_eq_entries = 1024; 647 phba->params.num_cq_entries = 1024; 648 phba->params.wrbs_per_cxn = 256; 649 } 650 651 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 652 unsigned int id, unsigned int clr_interrupt, 653 unsigned int num_processed, 654 unsigned char rearm, unsigned char event) 655 { 656 u32 val = 0; 657 658 if (rearm) 659 val |= 1 << DB_EQ_REARM_SHIFT; 660 if (clr_interrupt) 661 val |= 1 << DB_EQ_CLR_SHIFT; 662 if (event) 663 val |= 1 << DB_EQ_EVNT_SHIFT; 664 665 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 666 /* Setting lower order EQ_ID Bits */ 667 val |= (id & DB_EQ_RING_ID_LOW_MASK); 668 669 /* Setting Higher order EQ_ID Bits */ 670 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 671 DB_EQ_RING_ID_HIGH_MASK) 672 << DB_EQ_HIGH_SET_SHIFT); 673 674 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 675 } 676 677 /** 678 * be_isr_mcc - The isr routine of the driver. 679 * @irq: Not used 680 * @dev_id: Pointer to host adapter structure 681 */ 682 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 683 { 684 struct beiscsi_hba *phba; 685 struct be_eq_entry *eqe; 686 struct be_queue_info *eq; 687 struct be_queue_info *mcc; 688 unsigned int mcc_events; 689 struct be_eq_obj *pbe_eq; 690 691 pbe_eq = dev_id; 692 eq = &pbe_eq->q; 693 phba = pbe_eq->phba; 694 mcc = &phba->ctrl.mcc_obj.cq; 695 eqe = queue_tail_node(eq); 696 697 mcc_events = 0; 698 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 699 & EQE_VALID_MASK) { 700 if (((eqe->dw[offsetof(struct amap_eq_entry, 701 resource_id) / 32] & 702 EQE_RESID_MASK) >> 16) == mcc->id) { 703 mcc_events++; 704 } 705 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 706 queue_tail_inc(eq); 707 eqe = queue_tail_node(eq); 708 } 709 710 if (mcc_events) { 711 queue_work(phba->wq, &pbe_eq->mcc_work); 712 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 713 } 714 return IRQ_HANDLED; 715 } 716 717 /** 718 * be_isr_msix - The isr routine of the driver. 719 * @irq: Not used 720 * @dev_id: Pointer to host adapter structure 721 */ 722 static irqreturn_t be_isr_msix(int irq, void *dev_id) 723 { 724 struct beiscsi_hba *phba; 725 struct be_queue_info *eq; 726 struct be_eq_obj *pbe_eq; 727 728 pbe_eq = dev_id; 729 eq = &pbe_eq->q; 730 731 phba = pbe_eq->phba; 732 /* disable interrupt till iopoll completes */ 733 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 734 irq_poll_sched(&pbe_eq->iopoll); 735 736 return IRQ_HANDLED; 737 } 738 739 /** 740 * be_isr - The isr routine of the driver. 741 * @irq: Not used 742 * @dev_id: Pointer to host adapter structure 743 */ 744 static irqreturn_t be_isr(int irq, void *dev_id) 745 { 746 struct beiscsi_hba *phba; 747 struct hwi_controller *phwi_ctrlr; 748 struct hwi_context_memory *phwi_context; 749 struct be_eq_entry *eqe; 750 struct be_queue_info *eq; 751 struct be_queue_info *mcc; 752 unsigned int mcc_events, io_events; 753 struct be_ctrl_info *ctrl; 754 struct be_eq_obj *pbe_eq; 755 int isr, rearm; 756 757 phba = dev_id; 758 ctrl = &phba->ctrl; 759 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 760 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 761 if (!isr) 762 return IRQ_NONE; 763 764 phwi_ctrlr = phba->phwi_ctrlr; 765 phwi_context = phwi_ctrlr->phwi_ctxt; 766 pbe_eq = &phwi_context->be_eq[0]; 767 768 eq = &phwi_context->be_eq[0].q; 769 mcc = &phba->ctrl.mcc_obj.cq; 770 eqe = queue_tail_node(eq); 771 772 io_events = 0; 773 mcc_events = 0; 774 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 775 & EQE_VALID_MASK) { 776 if (((eqe->dw[offsetof(struct amap_eq_entry, 777 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 778 mcc_events++; 779 else 780 io_events++; 781 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 782 queue_tail_inc(eq); 783 eqe = queue_tail_node(eq); 784 } 785 if (!io_events && !mcc_events) 786 return IRQ_NONE; 787 788 /* no need to rearm if interrupt is only for IOs */ 789 rearm = 0; 790 if (mcc_events) { 791 queue_work(phba->wq, &pbe_eq->mcc_work); 792 /* rearm for MCCQ */ 793 rearm = 1; 794 } 795 if (io_events) 796 irq_poll_sched(&pbe_eq->iopoll); 797 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 798 return IRQ_HANDLED; 799 } 800 801 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 802 { 803 struct hwi_context_memory *phwi_context; 804 int i; 805 806 if (!phba->pcidev->msix_enabled) { 807 if (phba->pcidev->irq) 808 free_irq(phba->pcidev->irq, phba); 809 return; 810 } 811 812 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 813 for (i = 0; i <= phba->num_cpus; i++) { 814 free_irq(pci_irq_vector(phba->pcidev, i), 815 &phwi_context->be_eq[i]); 816 kfree(phba->msi_name[i]); 817 } 818 } 819 820 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 821 { 822 struct pci_dev *pcidev = phba->pcidev; 823 struct hwi_controller *phwi_ctrlr; 824 struct hwi_context_memory *phwi_context; 825 int ret, i, j; 826 827 phwi_ctrlr = phba->phwi_ctrlr; 828 phwi_context = phwi_ctrlr->phwi_ctxt; 829 830 if (pcidev->msix_enabled) { 831 for (i = 0; i < phba->num_cpus; i++) { 832 phba->msi_name[i] = kasprintf(GFP_KERNEL, 833 "beiscsi_%02x_%02x", 834 phba->shost->host_no, i); 835 if (!phba->msi_name[i]) { 836 ret = -ENOMEM; 837 goto free_msix_irqs; 838 } 839 840 ret = request_irq(pci_irq_vector(pcidev, i), 841 be_isr_msix, 0, phba->msi_name[i], 842 &phwi_context->be_eq[i]); 843 if (ret) { 844 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 845 "BM_%d : beiscsi_init_irqs-Failed to" 846 "register msix for i = %d\n", 847 i); 848 kfree(phba->msi_name[i]); 849 goto free_msix_irqs; 850 } 851 } 852 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 853 phba->shost->host_no); 854 if (!phba->msi_name[i]) { 855 ret = -ENOMEM; 856 goto free_msix_irqs; 857 } 858 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 859 phba->msi_name[i], &phwi_context->be_eq[i]); 860 if (ret) { 861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 862 "BM_%d : beiscsi_init_irqs-" 863 "Failed to register beiscsi_msix_mcc\n"); 864 kfree(phba->msi_name[i]); 865 goto free_msix_irqs; 866 } 867 868 } else { 869 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 870 "beiscsi", phba); 871 if (ret) { 872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 873 "BM_%d : beiscsi_init_irqs-" 874 "Failed to register irq\\n"); 875 return ret; 876 } 877 } 878 return 0; 879 free_msix_irqs: 880 for (j = i - 1; j >= 0; j--) { 881 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 882 kfree(phba->msi_name[j]); 883 } 884 return ret; 885 } 886 887 void hwi_ring_cq_db(struct beiscsi_hba *phba, 888 unsigned int id, unsigned int num_processed, 889 unsigned char rearm) 890 { 891 u32 val = 0; 892 893 if (rearm) 894 val |= 1 << DB_CQ_REARM_SHIFT; 895 896 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 897 898 /* Setting lower order CQ_ID Bits */ 899 val |= (id & DB_CQ_RING_ID_LOW_MASK); 900 901 /* Setting Higher order CQ_ID Bits */ 902 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 903 DB_CQ_RING_ID_HIGH_MASK) 904 << DB_CQ_HIGH_SET_SHIFT); 905 906 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 907 } 908 909 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 910 { 911 struct sgl_handle *psgl_handle; 912 unsigned long flags; 913 914 spin_lock_irqsave(&phba->io_sgl_lock, flags); 915 if (phba->io_sgl_hndl_avbl) { 916 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 917 "BM_%d : In alloc_io_sgl_handle," 918 " io_sgl_alloc_index=%d\n", 919 phba->io_sgl_alloc_index); 920 921 psgl_handle = phba->io_sgl_hndl_base[phba-> 922 io_sgl_alloc_index]; 923 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 924 phba->io_sgl_hndl_avbl--; 925 if (phba->io_sgl_alloc_index == (phba->params. 926 ios_per_ctrl - 1)) 927 phba->io_sgl_alloc_index = 0; 928 else 929 phba->io_sgl_alloc_index++; 930 } else 931 psgl_handle = NULL; 932 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 933 return psgl_handle; 934 } 935 936 static void 937 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 938 { 939 unsigned long flags; 940 941 spin_lock_irqsave(&phba->io_sgl_lock, flags); 942 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 943 "BM_%d : In free_,io_sgl_free_index=%d\n", 944 phba->io_sgl_free_index); 945 946 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 947 /* 948 * this can happen if clean_task is called on a task that 949 * failed in xmit_task or alloc_pdu. 950 */ 951 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 952 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 953 phba->io_sgl_free_index, 954 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 955 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 956 return; 957 } 958 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 959 phba->io_sgl_hndl_avbl++; 960 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 961 phba->io_sgl_free_index = 0; 962 else 963 phba->io_sgl_free_index++; 964 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 965 } 966 967 static inline struct wrb_handle * 968 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 969 unsigned int wrbs_per_cxn) 970 { 971 struct wrb_handle *pwrb_handle; 972 unsigned long flags; 973 974 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 975 if (!pwrb_context->wrb_handles_available) { 976 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 977 return NULL; 978 } 979 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 980 pwrb_context->wrb_handles_available--; 981 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 982 pwrb_context->alloc_index = 0; 983 else 984 pwrb_context->alloc_index++; 985 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 986 987 if (pwrb_handle) 988 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 989 990 return pwrb_handle; 991 } 992 993 /** 994 * alloc_wrb_handle - To allocate a wrb handle 995 * @phba: The hba pointer 996 * @cid: The cid to use for allocation 997 * @pwrb_context: ptr to ptr to wrb context 998 * 999 * This happens under session_lock until submission to chip 1000 */ 1001 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 1002 struct hwi_wrb_context **pcontext) 1003 { 1004 struct hwi_wrb_context *pwrb_context; 1005 struct hwi_controller *phwi_ctrlr; 1006 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1007 1008 phwi_ctrlr = phba->phwi_ctrlr; 1009 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1010 /* return the context address */ 1011 *pcontext = pwrb_context; 1012 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1013 } 1014 1015 static inline void 1016 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1017 struct wrb_handle *pwrb_handle, 1018 unsigned int wrbs_per_cxn) 1019 { 1020 unsigned long flags; 1021 1022 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1023 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1024 pwrb_context->wrb_handles_available++; 1025 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1026 pwrb_context->free_index = 0; 1027 else 1028 pwrb_context->free_index++; 1029 pwrb_handle->pio_handle = NULL; 1030 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1031 } 1032 1033 /** 1034 * free_wrb_handle - To free the wrb handle back to pool 1035 * @phba: The hba pointer 1036 * @pwrb_context: The context to free from 1037 * @pwrb_handle: The wrb_handle to free 1038 * 1039 * This happens under session_lock until submission to chip 1040 */ 1041 static void 1042 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1043 struct wrb_handle *pwrb_handle) 1044 { 1045 beiscsi_put_wrb_handle(pwrb_context, 1046 pwrb_handle, 1047 phba->params.wrbs_per_cxn); 1048 beiscsi_log(phba, KERN_INFO, 1049 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1050 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1051 "wrb_handles_available=%d\n", 1052 pwrb_handle, pwrb_context->free_index, 1053 pwrb_context->wrb_handles_available); 1054 } 1055 1056 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1057 { 1058 struct sgl_handle *psgl_handle; 1059 unsigned long flags; 1060 1061 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1062 if (phba->eh_sgl_hndl_avbl) { 1063 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1064 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1065 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1066 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1067 phba->eh_sgl_alloc_index, 1068 phba->eh_sgl_alloc_index); 1069 1070 phba->eh_sgl_hndl_avbl--; 1071 if (phba->eh_sgl_alloc_index == 1072 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1073 1)) 1074 phba->eh_sgl_alloc_index = 0; 1075 else 1076 phba->eh_sgl_alloc_index++; 1077 } else 1078 psgl_handle = NULL; 1079 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1080 return psgl_handle; 1081 } 1082 1083 void 1084 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1085 { 1086 unsigned long flags; 1087 1088 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1089 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1090 "BM_%d : In free_mgmt_sgl_handle," 1091 "eh_sgl_free_index=%d\n", 1092 phba->eh_sgl_free_index); 1093 1094 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1095 /* 1096 * this can happen if clean_task is called on a task that 1097 * failed in xmit_task or alloc_pdu. 1098 */ 1099 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1100 "BM_%d : Double Free in eh SGL ," 1101 "eh_sgl_free_index=%d\n", 1102 phba->eh_sgl_free_index); 1103 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1104 return; 1105 } 1106 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1107 phba->eh_sgl_hndl_avbl++; 1108 if (phba->eh_sgl_free_index == 1109 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1110 phba->eh_sgl_free_index = 0; 1111 else 1112 phba->eh_sgl_free_index++; 1113 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1114 } 1115 1116 static void 1117 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1118 struct iscsi_task *task, 1119 struct common_sol_cqe *csol_cqe) 1120 { 1121 struct beiscsi_io_task *io_task = task->dd_data; 1122 struct be_status_bhs *sts_bhs = 1123 (struct be_status_bhs *)io_task->cmd_bhs; 1124 struct iscsi_conn *conn = beiscsi_conn->conn; 1125 unsigned char *sense; 1126 u32 resid = 0, exp_cmdsn, max_cmdsn; 1127 u8 rsp, status, flags; 1128 1129 exp_cmdsn = csol_cqe->exp_cmdsn; 1130 max_cmdsn = (csol_cqe->exp_cmdsn + 1131 csol_cqe->cmd_wnd - 1); 1132 rsp = csol_cqe->i_resp; 1133 status = csol_cqe->i_sts; 1134 flags = csol_cqe->i_flags; 1135 resid = csol_cqe->res_cnt; 1136 1137 if (!task->sc) { 1138 if (io_task->scsi_cmnd) { 1139 scsi_dma_unmap(io_task->scsi_cmnd); 1140 io_task->scsi_cmnd = NULL; 1141 } 1142 1143 return; 1144 } 1145 task->sc->result = (DID_OK << 16) | status; 1146 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1147 task->sc->result = DID_ERROR << 16; 1148 goto unmap; 1149 } 1150 1151 /* bidi not initially supported */ 1152 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1153 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1154 task->sc->result = DID_ERROR << 16; 1155 1156 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1157 scsi_set_resid(task->sc, resid); 1158 if (!status && (scsi_bufflen(task->sc) - resid < 1159 task->sc->underflow)) 1160 task->sc->result = DID_ERROR << 16; 1161 } 1162 } 1163 1164 if (status == SAM_STAT_CHECK_CONDITION) { 1165 u16 sense_len; 1166 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1167 1168 sense = sts_bhs->sense_info + sizeof(unsigned short); 1169 sense_len = be16_to_cpu(*slen); 1170 memcpy(task->sc->sense_buffer, sense, 1171 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1172 } 1173 1174 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1175 conn->rxdata_octets += resid; 1176 unmap: 1177 if (io_task->scsi_cmnd) { 1178 scsi_dma_unmap(io_task->scsi_cmnd); 1179 io_task->scsi_cmnd = NULL; 1180 } 1181 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1182 } 1183 1184 static void 1185 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1186 struct iscsi_task *task, 1187 struct common_sol_cqe *csol_cqe) 1188 { 1189 struct iscsi_logout_rsp *hdr; 1190 struct beiscsi_io_task *io_task = task->dd_data; 1191 struct iscsi_conn *conn = beiscsi_conn->conn; 1192 1193 hdr = (struct iscsi_logout_rsp *)task->hdr; 1194 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1195 hdr->t2wait = 5; 1196 hdr->t2retain = 0; 1197 hdr->flags = csol_cqe->i_flags; 1198 hdr->response = csol_cqe->i_resp; 1199 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1200 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1201 csol_cqe->cmd_wnd - 1); 1202 1203 hdr->dlength[0] = 0; 1204 hdr->dlength[1] = 0; 1205 hdr->dlength[2] = 0; 1206 hdr->hlength = 0; 1207 hdr->itt = io_task->libiscsi_itt; 1208 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1209 } 1210 1211 static void 1212 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1213 struct iscsi_task *task, 1214 struct common_sol_cqe *csol_cqe) 1215 { 1216 struct iscsi_tm_rsp *hdr; 1217 struct iscsi_conn *conn = beiscsi_conn->conn; 1218 struct beiscsi_io_task *io_task = task->dd_data; 1219 1220 hdr = (struct iscsi_tm_rsp *)task->hdr; 1221 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1222 hdr->flags = csol_cqe->i_flags; 1223 hdr->response = csol_cqe->i_resp; 1224 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1225 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1226 csol_cqe->cmd_wnd - 1); 1227 1228 hdr->itt = io_task->libiscsi_itt; 1229 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1230 } 1231 1232 static void 1233 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1234 struct beiscsi_hba *phba, struct sol_cqe *psol) 1235 { 1236 struct hwi_wrb_context *pwrb_context; 1237 uint16_t wrb_index, cid, cri_index; 1238 struct hwi_controller *phwi_ctrlr; 1239 struct wrb_handle *pwrb_handle; 1240 struct iscsi_session *session; 1241 struct iscsi_task *task; 1242 1243 phwi_ctrlr = phba->phwi_ctrlr; 1244 if (is_chip_be2_be3r(phba)) { 1245 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1246 wrb_idx, psol); 1247 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1248 cid, psol); 1249 } else { 1250 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1251 wrb_idx, psol); 1252 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1253 cid, psol); 1254 } 1255 1256 cri_index = BE_GET_CRI_FROM_CID(cid); 1257 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1258 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1259 session = beiscsi_conn->conn->session; 1260 spin_lock_bh(&session->back_lock); 1261 task = pwrb_handle->pio_handle; 1262 if (task) 1263 __iscsi_put_task(task); 1264 spin_unlock_bh(&session->back_lock); 1265 } 1266 1267 static void 1268 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1269 struct iscsi_task *task, 1270 struct common_sol_cqe *csol_cqe) 1271 { 1272 struct iscsi_nopin *hdr; 1273 struct iscsi_conn *conn = beiscsi_conn->conn; 1274 struct beiscsi_io_task *io_task = task->dd_data; 1275 1276 hdr = (struct iscsi_nopin *)task->hdr; 1277 hdr->flags = csol_cqe->i_flags; 1278 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1279 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1280 csol_cqe->cmd_wnd - 1); 1281 1282 hdr->opcode = ISCSI_OP_NOOP_IN; 1283 hdr->itt = io_task->libiscsi_itt; 1284 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1285 } 1286 1287 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1288 struct sol_cqe *psol, 1289 struct common_sol_cqe *csol_cqe) 1290 { 1291 if (is_chip_be2_be3r(phba)) { 1292 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1293 i_exp_cmd_sn, psol); 1294 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1295 i_res_cnt, psol); 1296 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1297 i_cmd_wnd, psol); 1298 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1299 wrb_index, psol); 1300 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1301 cid, psol); 1302 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1303 hw_sts, psol); 1304 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1305 i_resp, psol); 1306 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1307 i_sts, psol); 1308 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1309 i_flags, psol); 1310 } else { 1311 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1312 i_exp_cmd_sn, psol); 1313 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1314 i_res_cnt, psol); 1315 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1316 wrb_index, psol); 1317 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1318 cid, psol); 1319 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1320 hw_sts, psol); 1321 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1322 i_cmd_wnd, psol); 1323 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1324 cmd_cmpl, psol)) 1325 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1326 i_sts, psol); 1327 else 1328 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1329 i_sts, psol); 1330 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1331 u, psol)) 1332 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1333 1334 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1335 o, psol)) 1336 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1337 } 1338 } 1339 1340 1341 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1342 struct beiscsi_hba *phba, struct sol_cqe *psol) 1343 { 1344 struct iscsi_conn *conn = beiscsi_conn->conn; 1345 struct iscsi_session *session = conn->session; 1346 struct common_sol_cqe csol_cqe = {0}; 1347 struct hwi_wrb_context *pwrb_context; 1348 struct hwi_controller *phwi_ctrlr; 1349 struct wrb_handle *pwrb_handle; 1350 struct iscsi_task *task; 1351 uint16_t cri_index = 0; 1352 uint8_t type; 1353 1354 phwi_ctrlr = phba->phwi_ctrlr; 1355 1356 /* Copy the elements to a common structure */ 1357 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1358 1359 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1360 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1361 1362 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1363 csol_cqe.wrb_index]; 1364 1365 spin_lock_bh(&session->back_lock); 1366 task = pwrb_handle->pio_handle; 1367 if (!task) { 1368 spin_unlock_bh(&session->back_lock); 1369 return; 1370 } 1371 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1372 1373 switch (type) { 1374 case HWH_TYPE_IO: 1375 case HWH_TYPE_IO_RD: 1376 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1377 ISCSI_OP_NOOP_OUT) 1378 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1379 else 1380 be_complete_io(beiscsi_conn, task, &csol_cqe); 1381 break; 1382 1383 case HWH_TYPE_LOGOUT: 1384 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1385 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1386 else 1387 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1388 break; 1389 1390 case HWH_TYPE_LOGIN: 1391 beiscsi_log(phba, KERN_ERR, 1392 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1393 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1394 " hwi_complete_cmd- Solicited path\n"); 1395 break; 1396 1397 case HWH_TYPE_NOP: 1398 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1399 break; 1400 1401 default: 1402 beiscsi_log(phba, KERN_WARNING, 1403 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1404 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1405 "wrb_index 0x%x CID 0x%x\n", type, 1406 csol_cqe.wrb_index, 1407 csol_cqe.cid); 1408 break; 1409 } 1410 1411 spin_unlock_bh(&session->back_lock); 1412 } 1413 1414 /** 1415 * ASYNC PDUs include 1416 * a. Unsolicited NOP-In (target initiated NOP-In) 1417 * b. ASYNC Messages 1418 * c. Reject PDU 1419 * d. Login response 1420 * These headers arrive unprocessed by the EP firmware. 1421 * iSCSI layer processes them. 1422 */ 1423 static unsigned int 1424 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1425 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1426 { 1427 struct beiscsi_hba *phba = beiscsi_conn->phba; 1428 struct iscsi_conn *conn = beiscsi_conn->conn; 1429 struct beiscsi_io_task *io_task; 1430 struct iscsi_hdr *login_hdr; 1431 struct iscsi_task *task; 1432 u8 code; 1433 1434 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1435 switch (code) { 1436 case ISCSI_OP_NOOP_IN: 1437 pdata = NULL; 1438 dlen = 0; 1439 break; 1440 case ISCSI_OP_ASYNC_EVENT: 1441 break; 1442 case ISCSI_OP_REJECT: 1443 WARN_ON(!pdata); 1444 WARN_ON(!(dlen == 48)); 1445 beiscsi_log(phba, KERN_ERR, 1446 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1447 "BM_%d : In ISCSI_OP_REJECT\n"); 1448 break; 1449 case ISCSI_OP_LOGIN_RSP: 1450 case ISCSI_OP_TEXT_RSP: 1451 task = conn->login_task; 1452 io_task = task->dd_data; 1453 login_hdr = (struct iscsi_hdr *)phdr; 1454 login_hdr->itt = io_task->libiscsi_itt; 1455 break; 1456 default: 1457 beiscsi_log(phba, KERN_WARNING, 1458 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1459 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1460 code); 1461 return 1; 1462 } 1463 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1464 return 0; 1465 } 1466 1467 static inline void 1468 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1469 struct hd_async_handle *pasync_handle) 1470 { 1471 pasync_handle->is_final = 0; 1472 pasync_handle->buffer_len = 0; 1473 pasync_handle->in_use = 0; 1474 list_del_init(&pasync_handle->link); 1475 } 1476 1477 static void 1478 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1479 struct hd_async_context *pasync_ctx, 1480 u16 cri) 1481 { 1482 struct hd_async_handle *pasync_handle, *tmp_handle; 1483 struct list_head *plist; 1484 1485 plist = &pasync_ctx->async_entry[cri].wq.list; 1486 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1487 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1488 1489 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1490 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1491 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1492 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1493 } 1494 1495 static struct hd_async_handle * 1496 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1497 struct hd_async_context *pasync_ctx, 1498 struct i_t_dpdu_cqe *pdpdu_cqe, 1499 u8 *header) 1500 { 1501 struct beiscsi_hba *phba = beiscsi_conn->phba; 1502 struct hd_async_handle *pasync_handle; 1503 struct be_bus_address phys_addr; 1504 u16 cid, code, ci, cri; 1505 u8 final, error = 0; 1506 u32 dpl; 1507 1508 cid = beiscsi_conn->beiscsi_conn_cid; 1509 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1510 /** 1511 * This function is invoked to get the right async_handle structure 1512 * from a given DEF PDU CQ entry. 1513 * 1514 * - index in CQ entry gives the vertical index 1515 * - address in CQ entry is the offset where the DMA last ended 1516 * - final - no more notifications for this PDU 1517 */ 1518 if (is_chip_be2_be3r(phba)) { 1519 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1520 dpl, pdpdu_cqe); 1521 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1522 index, pdpdu_cqe); 1523 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1524 final, pdpdu_cqe); 1525 } else { 1526 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1527 dpl, pdpdu_cqe); 1528 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1529 index, pdpdu_cqe); 1530 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1531 final, pdpdu_cqe); 1532 } 1533 1534 /** 1535 * DB addr Hi/Lo is same for BE and SKH. 1536 * Subtract the dataplacementlength to get to the base. 1537 */ 1538 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1539 db_addr_lo, pdpdu_cqe); 1540 phys_addr.u.a32.address_lo -= dpl; 1541 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1542 db_addr_hi, pdpdu_cqe); 1543 1544 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1545 switch (code) { 1546 case UNSOL_HDR_NOTIFY: 1547 pasync_handle = pasync_ctx->async_entry[ci].header; 1548 *header = 1; 1549 break; 1550 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1551 error = 1; 1552 case UNSOL_DATA_NOTIFY: 1553 pasync_handle = pasync_ctx->async_entry[ci].data; 1554 break; 1555 /* called only for above codes */ 1556 default: 1557 return NULL; 1558 } 1559 1560 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1561 pasync_handle->index != ci) { 1562 /* driver bug - if ci does not match async handle index */ 1563 error = 1; 1564 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1565 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1566 cid, pasync_handle->is_header ? 'H' : 'D', 1567 pasync_handle->pa.u.a64.address, 1568 pasync_handle->index, 1569 phys_addr.u.a64.address, ci); 1570 /* FW has stale address - attempt continuing by dropping */ 1571 } 1572 1573 /** 1574 * DEF PDU header and data buffers with errors should be simply 1575 * dropped as there are no consumers for it. 1576 */ 1577 if (error) { 1578 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1579 return NULL; 1580 } 1581 1582 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1583 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1584 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1585 cid, code, ci, phys_addr.u.a64.address); 1586 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1587 } 1588 1589 list_del_init(&pasync_handle->link); 1590 /** 1591 * Each CID is associated with unique CRI. 1592 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1593 **/ 1594 pasync_handle->cri = cri; 1595 pasync_handle->is_final = final; 1596 pasync_handle->buffer_len = dpl; 1597 pasync_handle->in_use = 1; 1598 1599 return pasync_handle; 1600 } 1601 1602 static unsigned int 1603 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1604 struct hd_async_context *pasync_ctx, 1605 u16 cri) 1606 { 1607 struct iscsi_session *session = beiscsi_conn->conn->session; 1608 struct hd_async_handle *pasync_handle, *plast_handle; 1609 struct beiscsi_hba *phba = beiscsi_conn->phba; 1610 void *phdr = NULL, *pdata = NULL; 1611 u32 dlen = 0, status = 0; 1612 struct list_head *plist; 1613 1614 plist = &pasync_ctx->async_entry[cri].wq.list; 1615 plast_handle = NULL; 1616 list_for_each_entry(pasync_handle, plist, link) { 1617 plast_handle = pasync_handle; 1618 /* get the header, the first entry */ 1619 if (!phdr) { 1620 phdr = pasync_handle->pbuffer; 1621 continue; 1622 } 1623 /* use first buffer to collect all the data */ 1624 if (!pdata) { 1625 pdata = pasync_handle->pbuffer; 1626 dlen = pasync_handle->buffer_len; 1627 continue; 1628 } 1629 if (!pasync_handle->buffer_len || 1630 (dlen + pasync_handle->buffer_len) > 1631 pasync_ctx->async_data.buffer_size) 1632 break; 1633 memcpy(pdata + dlen, pasync_handle->pbuffer, 1634 pasync_handle->buffer_len); 1635 dlen += pasync_handle->buffer_len; 1636 } 1637 1638 if (!plast_handle->is_final) { 1639 /* last handle should have final PDU notification from FW */ 1640 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1641 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1642 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1643 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1644 pasync_ctx->async_entry[cri].wq.hdr_len, 1645 pasync_ctx->async_entry[cri].wq.bytes_needed, 1646 pasync_ctx->async_entry[cri].wq.bytes_received); 1647 } 1648 spin_lock_bh(&session->back_lock); 1649 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1650 spin_unlock_bh(&session->back_lock); 1651 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1652 return status; 1653 } 1654 1655 static unsigned int 1656 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1657 struct hd_async_context *pasync_ctx, 1658 struct hd_async_handle *pasync_handle) 1659 { 1660 unsigned int bytes_needed = 0, status = 0; 1661 u16 cri = pasync_handle->cri; 1662 struct cri_wait_queue *wq; 1663 struct beiscsi_hba *phba; 1664 struct pdu_base *ppdu; 1665 char *err = ""; 1666 1667 phba = beiscsi_conn->phba; 1668 wq = &pasync_ctx->async_entry[cri].wq; 1669 if (pasync_handle->is_header) { 1670 /* check if PDU hdr is rcv'd when old hdr not completed */ 1671 if (wq->hdr_len) { 1672 err = "incomplete"; 1673 goto drop_pdu; 1674 } 1675 ppdu = pasync_handle->pbuffer; 1676 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1677 data_len_hi, ppdu); 1678 bytes_needed <<= 16; 1679 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1680 data_len_lo, ppdu)); 1681 wq->hdr_len = pasync_handle->buffer_len; 1682 wq->bytes_received = 0; 1683 wq->bytes_needed = bytes_needed; 1684 list_add_tail(&pasync_handle->link, &wq->list); 1685 if (!bytes_needed) 1686 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1687 pasync_ctx, cri); 1688 } else { 1689 /* check if data received has header and is needed */ 1690 if (!wq->hdr_len || !wq->bytes_needed) { 1691 err = "header less"; 1692 goto drop_pdu; 1693 } 1694 wq->bytes_received += pasync_handle->buffer_len; 1695 /* Something got overwritten? Better catch it here. */ 1696 if (wq->bytes_received > wq->bytes_needed) { 1697 err = "overflow"; 1698 goto drop_pdu; 1699 } 1700 list_add_tail(&pasync_handle->link, &wq->list); 1701 if (wq->bytes_received == wq->bytes_needed) 1702 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1703 pasync_ctx, cri); 1704 } 1705 return status; 1706 1707 drop_pdu: 1708 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1709 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1710 beiscsi_conn->beiscsi_conn_cid, err, 1711 pasync_handle->is_header ? 'H' : 'D', 1712 wq->hdr_len, wq->bytes_needed, 1713 pasync_handle->buffer_len); 1714 /* discard this handle */ 1715 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1716 /* free all the other handles in cri_wait_queue */ 1717 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1718 /* try continuing */ 1719 return status; 1720 } 1721 1722 static void 1723 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1724 u8 header, u8 ulp_num, u16 nbuf) 1725 { 1726 struct hd_async_handle *pasync_handle; 1727 struct hd_async_context *pasync_ctx; 1728 struct hwi_controller *phwi_ctrlr; 1729 struct phys_addr *pasync_sge; 1730 u32 ring_id, doorbell = 0; 1731 u32 doorbell_offset; 1732 u16 prod, pi; 1733 1734 phwi_ctrlr = phba->phwi_ctrlr; 1735 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1736 if (header) { 1737 pasync_sge = pasync_ctx->async_header.ring_base; 1738 pi = pasync_ctx->async_header.pi; 1739 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1740 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1741 doorbell_offset; 1742 } else { 1743 pasync_sge = pasync_ctx->async_data.ring_base; 1744 pi = pasync_ctx->async_data.pi; 1745 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1746 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1747 doorbell_offset; 1748 } 1749 1750 for (prod = 0; prod < nbuf; prod++) { 1751 if (header) 1752 pasync_handle = pasync_ctx->async_entry[pi].header; 1753 else 1754 pasync_handle = pasync_ctx->async_entry[pi].data; 1755 WARN_ON(pasync_handle->is_header != header); 1756 WARN_ON(pasync_handle->index != pi); 1757 /* setup the ring only once */ 1758 if (nbuf == pasync_ctx->num_entries) { 1759 /* note hi is lo */ 1760 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1761 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1762 } 1763 if (++pi == pasync_ctx->num_entries) 1764 pi = 0; 1765 } 1766 1767 if (header) 1768 pasync_ctx->async_header.pi = pi; 1769 else 1770 pasync_ctx->async_data.pi = pi; 1771 1772 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1773 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1774 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1775 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1776 iowrite32(doorbell, phba->db_va + doorbell_offset); 1777 } 1778 1779 static void 1780 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1781 struct i_t_dpdu_cqe *pdpdu_cqe) 1782 { 1783 struct beiscsi_hba *phba = beiscsi_conn->phba; 1784 struct hd_async_handle *pasync_handle = NULL; 1785 struct hd_async_context *pasync_ctx; 1786 struct hwi_controller *phwi_ctrlr; 1787 u8 ulp_num, consumed, header = 0; 1788 u16 cid_cri; 1789 1790 phwi_ctrlr = phba->phwi_ctrlr; 1791 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1792 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1793 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1794 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1795 pdpdu_cqe, &header); 1796 if (is_chip_be2_be3r(phba)) 1797 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1798 num_cons, pdpdu_cqe); 1799 else 1800 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1801 num_cons, pdpdu_cqe); 1802 if (pasync_handle) 1803 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1804 /* num_cons indicates number of 8 RQEs consumed */ 1805 if (consumed) 1806 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1807 } 1808 1809 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1810 { 1811 struct be_queue_info *mcc_cq; 1812 struct be_mcc_compl *mcc_compl; 1813 unsigned int num_processed = 0; 1814 1815 mcc_cq = &phba->ctrl.mcc_obj.cq; 1816 mcc_compl = queue_tail_node(mcc_cq); 1817 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1818 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1819 if (beiscsi_hba_in_error(phba)) 1820 return; 1821 1822 if (num_processed >= 32) { 1823 hwi_ring_cq_db(phba, mcc_cq->id, 1824 num_processed, 0); 1825 num_processed = 0; 1826 } 1827 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1828 beiscsi_process_async_event(phba, mcc_compl); 1829 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1830 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1831 } 1832 1833 mcc_compl->flags = 0; 1834 queue_tail_inc(mcc_cq); 1835 mcc_compl = queue_tail_node(mcc_cq); 1836 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1837 num_processed++; 1838 } 1839 1840 if (num_processed > 0) 1841 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1842 } 1843 1844 static void beiscsi_mcc_work(struct work_struct *work) 1845 { 1846 struct be_eq_obj *pbe_eq; 1847 struct beiscsi_hba *phba; 1848 1849 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1850 phba = pbe_eq->phba; 1851 beiscsi_process_mcc_cq(phba); 1852 /* rearm EQ for further interrupts */ 1853 if (!beiscsi_hba_in_error(phba)) 1854 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1855 } 1856 1857 /** 1858 * beiscsi_process_cq()- Process the Completion Queue 1859 * @pbe_eq: Event Q on which the Completion has come 1860 * @budget: Max number of events to processed 1861 * 1862 * return 1863 * Number of Completion Entries processed. 1864 **/ 1865 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1866 { 1867 struct be_queue_info *cq; 1868 struct sol_cqe *sol; 1869 struct dmsg_cqe *dmsg; 1870 unsigned int total = 0; 1871 unsigned int num_processed = 0; 1872 unsigned short code = 0, cid = 0; 1873 uint16_t cri_index = 0; 1874 struct beiscsi_conn *beiscsi_conn; 1875 struct beiscsi_endpoint *beiscsi_ep; 1876 struct iscsi_endpoint *ep; 1877 struct beiscsi_hba *phba; 1878 1879 cq = pbe_eq->cq; 1880 sol = queue_tail_node(cq); 1881 phba = pbe_eq->phba; 1882 1883 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1884 CQE_VALID_MASK) { 1885 if (beiscsi_hba_in_error(phba)) 1886 return 0; 1887 1888 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1889 1890 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1891 CQE_CODE_MASK); 1892 1893 /* Get the CID */ 1894 if (is_chip_be2_be3r(phba)) { 1895 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1896 } else { 1897 if ((code == DRIVERMSG_NOTIFY) || 1898 (code == UNSOL_HDR_NOTIFY) || 1899 (code == UNSOL_DATA_NOTIFY)) 1900 cid = AMAP_GET_BITS( 1901 struct amap_i_t_dpdu_cqe_v2, 1902 cid, sol); 1903 else 1904 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1905 cid, sol); 1906 } 1907 1908 cri_index = BE_GET_CRI_FROM_CID(cid); 1909 ep = phba->ep_array[cri_index]; 1910 1911 if (ep == NULL) { 1912 /* connection has already been freed 1913 * just move on to next one 1914 */ 1915 beiscsi_log(phba, KERN_WARNING, 1916 BEISCSI_LOG_INIT, 1917 "BM_%d : proc cqe of disconn ep: cid %d\n", 1918 cid); 1919 goto proc_next_cqe; 1920 } 1921 1922 beiscsi_ep = ep->dd_data; 1923 beiscsi_conn = beiscsi_ep->conn; 1924 1925 /* replenish cq */ 1926 if (num_processed == 32) { 1927 hwi_ring_cq_db(phba, cq->id, 32, 0); 1928 num_processed = 0; 1929 } 1930 total++; 1931 1932 switch (code) { 1933 case SOL_CMD_COMPLETE: 1934 hwi_complete_cmd(beiscsi_conn, phba, sol); 1935 break; 1936 case DRIVERMSG_NOTIFY: 1937 beiscsi_log(phba, KERN_INFO, 1938 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1939 "BM_%d : Received %s[%d] on CID : %d\n", 1940 cqe_desc[code], code, cid); 1941 1942 dmsg = (struct dmsg_cqe *)sol; 1943 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1944 break; 1945 case UNSOL_HDR_NOTIFY: 1946 beiscsi_log(phba, KERN_INFO, 1947 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1948 "BM_%d : Received %s[%d] on CID : %d\n", 1949 cqe_desc[code], code, cid); 1950 1951 spin_lock_bh(&phba->async_pdu_lock); 1952 beiscsi_hdq_process_compl(beiscsi_conn, 1953 (struct i_t_dpdu_cqe *)sol); 1954 spin_unlock_bh(&phba->async_pdu_lock); 1955 break; 1956 case UNSOL_DATA_NOTIFY: 1957 beiscsi_log(phba, KERN_INFO, 1958 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1959 "BM_%d : Received %s[%d] on CID : %d\n", 1960 cqe_desc[code], code, cid); 1961 1962 spin_lock_bh(&phba->async_pdu_lock); 1963 beiscsi_hdq_process_compl(beiscsi_conn, 1964 (struct i_t_dpdu_cqe *)sol); 1965 spin_unlock_bh(&phba->async_pdu_lock); 1966 break; 1967 case CXN_INVALIDATE_INDEX_NOTIFY: 1968 case CMD_INVALIDATED_NOTIFY: 1969 case CXN_INVALIDATE_NOTIFY: 1970 beiscsi_log(phba, KERN_ERR, 1971 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1972 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1973 cqe_desc[code], code, cid); 1974 break; 1975 case CXN_KILLED_HDR_DIGEST_ERR: 1976 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1977 beiscsi_log(phba, KERN_ERR, 1978 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1979 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1980 cqe_desc[code], code, cid); 1981 break; 1982 case CMD_KILLED_INVALID_STATSN_RCVD: 1983 case CMD_KILLED_INVALID_R2T_RCVD: 1984 case CMD_CXN_KILLED_LUN_INVALID: 1985 case CMD_CXN_KILLED_ICD_INVALID: 1986 case CMD_CXN_KILLED_ITT_INVALID: 1987 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1988 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1989 beiscsi_log(phba, KERN_ERR, 1990 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1991 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1992 cqe_desc[code], code, cid); 1993 break; 1994 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1995 beiscsi_log(phba, KERN_ERR, 1996 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1997 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1998 cqe_desc[code], code, cid); 1999 spin_lock_bh(&phba->async_pdu_lock); 2000 /* driver consumes the entry and drops the contents */ 2001 beiscsi_hdq_process_compl(beiscsi_conn, 2002 (struct i_t_dpdu_cqe *)sol); 2003 spin_unlock_bh(&phba->async_pdu_lock); 2004 break; 2005 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2006 case CXN_KILLED_BURST_LEN_MISMATCH: 2007 case CXN_KILLED_AHS_RCVD: 2008 case CXN_KILLED_UNKNOWN_HDR: 2009 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2010 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2011 case CXN_KILLED_TIMED_OUT: 2012 case CXN_KILLED_FIN_RCVD: 2013 case CXN_KILLED_RST_SENT: 2014 case CXN_KILLED_RST_RCVD: 2015 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2016 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2017 case CXN_KILLED_OVER_RUN_RESIDUAL: 2018 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2019 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2020 beiscsi_log(phba, KERN_ERR, 2021 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2022 "BM_%d : Event %s[%d] received on CID : %d\n", 2023 cqe_desc[code], code, cid); 2024 if (beiscsi_conn) 2025 iscsi_conn_failure(beiscsi_conn->conn, 2026 ISCSI_ERR_CONN_FAILED); 2027 break; 2028 default: 2029 beiscsi_log(phba, KERN_ERR, 2030 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2031 "BM_%d : Invalid CQE Event Received Code : %d" 2032 "CID 0x%x...\n", 2033 code, cid); 2034 break; 2035 } 2036 2037 proc_next_cqe: 2038 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2039 queue_tail_inc(cq); 2040 sol = queue_tail_node(cq); 2041 num_processed++; 2042 if (total == budget) 2043 break; 2044 } 2045 2046 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2047 return total; 2048 } 2049 2050 static int be_iopoll(struct irq_poll *iop, int budget) 2051 { 2052 unsigned int ret, io_events; 2053 struct beiscsi_hba *phba; 2054 struct be_eq_obj *pbe_eq; 2055 struct be_eq_entry *eqe = NULL; 2056 struct be_queue_info *eq; 2057 2058 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2059 phba = pbe_eq->phba; 2060 if (beiscsi_hba_in_error(phba)) { 2061 irq_poll_complete(iop); 2062 return 0; 2063 } 2064 2065 io_events = 0; 2066 eq = &pbe_eq->q; 2067 eqe = queue_tail_node(eq); 2068 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2069 EQE_VALID_MASK) { 2070 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2071 queue_tail_inc(eq); 2072 eqe = queue_tail_node(eq); 2073 io_events++; 2074 } 2075 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2076 2077 ret = beiscsi_process_cq(pbe_eq, budget); 2078 pbe_eq->cq_count += ret; 2079 if (ret < budget) { 2080 irq_poll_complete(iop); 2081 beiscsi_log(phba, KERN_INFO, 2082 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2083 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2084 pbe_eq->q.id, ret); 2085 if (!beiscsi_hba_in_error(phba)) 2086 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2087 } 2088 return ret; 2089 } 2090 2091 static void 2092 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2093 unsigned int num_sg, struct beiscsi_io_task *io_task) 2094 { 2095 struct iscsi_sge *psgl; 2096 unsigned int sg_len, index; 2097 unsigned int sge_len = 0; 2098 unsigned long long addr; 2099 struct scatterlist *l_sg; 2100 unsigned int offset; 2101 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2103 io_task->bhs_pa.u.a32.address_lo); 2104 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2105 io_task->bhs_pa.u.a32.address_hi); 2106 2107 l_sg = sg; 2108 for (index = 0; (index < num_sg) && (index < 2); index++, 2109 sg = sg_next(sg)) { 2110 if (index == 0) { 2111 sg_len = sg_dma_len(sg); 2112 addr = (u64) sg_dma_address(sg); 2113 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2114 sge0_addr_lo, pwrb, 2115 lower_32_bits(addr)); 2116 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2117 sge0_addr_hi, pwrb, 2118 upper_32_bits(addr)); 2119 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2120 sge0_len, pwrb, 2121 sg_len); 2122 sge_len = sg_len; 2123 } else { 2124 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2125 pwrb, sge_len); 2126 sg_len = sg_dma_len(sg); 2127 addr = (u64) sg_dma_address(sg); 2128 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2129 sge1_addr_lo, pwrb, 2130 lower_32_bits(addr)); 2131 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2132 sge1_addr_hi, pwrb, 2133 upper_32_bits(addr)); 2134 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2135 sge1_len, pwrb, 2136 sg_len); 2137 } 2138 } 2139 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2140 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2141 2142 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2143 2144 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2145 io_task->bhs_pa.u.a32.address_hi); 2146 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2147 io_task->bhs_pa.u.a32.address_lo); 2148 2149 if (num_sg == 1) { 2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2151 1); 2152 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2153 0); 2154 } else if (num_sg == 2) { 2155 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2156 0); 2157 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2158 1); 2159 } else { 2160 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2161 0); 2162 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2163 0); 2164 } 2165 2166 sg = l_sg; 2167 psgl++; 2168 psgl++; 2169 offset = 0; 2170 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2171 sg_len = sg_dma_len(sg); 2172 addr = (u64) sg_dma_address(sg); 2173 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2174 lower_32_bits(addr)); 2175 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2176 upper_32_bits(addr)); 2177 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2178 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2179 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2180 offset += sg_len; 2181 } 2182 psgl--; 2183 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2184 } 2185 2186 static void 2187 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2188 unsigned int num_sg, struct beiscsi_io_task *io_task) 2189 { 2190 struct iscsi_sge *psgl; 2191 unsigned int sg_len, index; 2192 unsigned int sge_len = 0; 2193 unsigned long long addr; 2194 struct scatterlist *l_sg; 2195 unsigned int offset; 2196 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2198 io_task->bhs_pa.u.a32.address_lo); 2199 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2200 io_task->bhs_pa.u.a32.address_hi); 2201 2202 l_sg = sg; 2203 for (index = 0; (index < num_sg) && (index < 2); index++, 2204 sg = sg_next(sg)) { 2205 if (index == 0) { 2206 sg_len = sg_dma_len(sg); 2207 addr = (u64) sg_dma_address(sg); 2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2209 ((u32)(addr & 0xFFFFFFFF))); 2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2211 ((u32)(addr >> 32))); 2212 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2213 sg_len); 2214 sge_len = sg_len; 2215 } else { 2216 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2217 pwrb, sge_len); 2218 sg_len = sg_dma_len(sg); 2219 addr = (u64) sg_dma_address(sg); 2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2221 ((u32)(addr & 0xFFFFFFFF))); 2222 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2223 ((u32)(addr >> 32))); 2224 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2225 sg_len); 2226 } 2227 } 2228 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2229 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2230 2231 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2232 2233 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2234 io_task->bhs_pa.u.a32.address_hi); 2235 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2236 io_task->bhs_pa.u.a32.address_lo); 2237 2238 if (num_sg == 1) { 2239 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2240 1); 2241 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2242 0); 2243 } else if (num_sg == 2) { 2244 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2245 0); 2246 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2247 1); 2248 } else { 2249 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2250 0); 2251 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2252 0); 2253 } 2254 sg = l_sg; 2255 psgl++; 2256 psgl++; 2257 offset = 0; 2258 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2259 sg_len = sg_dma_len(sg); 2260 addr = (u64) sg_dma_address(sg); 2261 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2262 (addr & 0xFFFFFFFF)); 2263 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2264 (addr >> 32)); 2265 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2266 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2267 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2268 offset += sg_len; 2269 } 2270 psgl--; 2271 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2272 } 2273 2274 /** 2275 * hwi_write_buffer()- Populate the WRB with task info 2276 * @pwrb: ptr to the WRB entry 2277 * @task: iscsi task which is to be executed 2278 **/ 2279 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2280 { 2281 struct iscsi_sge *psgl; 2282 struct beiscsi_io_task *io_task = task->dd_data; 2283 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2284 struct beiscsi_hba *phba = beiscsi_conn->phba; 2285 uint8_t dsp_value = 0; 2286 2287 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2289 io_task->bhs_pa.u.a32.address_lo); 2290 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2291 io_task->bhs_pa.u.a32.address_hi); 2292 2293 if (task->data) { 2294 2295 /* Check for the data_count */ 2296 dsp_value = (task->data_count) ? 1 : 0; 2297 2298 if (is_chip_be2_be3r(phba)) 2299 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2300 pwrb, dsp_value); 2301 else 2302 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2303 pwrb, dsp_value); 2304 2305 /* Map addr only if there is data_count */ 2306 if (dsp_value) { 2307 io_task->mtask_addr = pci_map_single(phba->pcidev, 2308 task->data, 2309 task->data_count, 2310 PCI_DMA_TODEVICE); 2311 if (pci_dma_mapping_error(phba->pcidev, 2312 io_task->mtask_addr)) 2313 return -ENOMEM; 2314 io_task->mtask_data_count = task->data_count; 2315 } else 2316 io_task->mtask_addr = 0; 2317 2318 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2319 lower_32_bits(io_task->mtask_addr)); 2320 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2321 upper_32_bits(io_task->mtask_addr)); 2322 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2323 task->data_count); 2324 2325 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2326 } else { 2327 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2328 io_task->mtask_addr = 0; 2329 } 2330 2331 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2332 2333 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2334 2335 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2336 io_task->bhs_pa.u.a32.address_hi); 2337 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2338 io_task->bhs_pa.u.a32.address_lo); 2339 if (task->data) { 2340 psgl++; 2341 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2342 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2343 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2344 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2345 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2346 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2347 2348 psgl++; 2349 if (task->data) { 2350 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2351 lower_32_bits(io_task->mtask_addr)); 2352 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2353 upper_32_bits(io_task->mtask_addr)); 2354 } 2355 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2356 } 2357 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2358 return 0; 2359 } 2360 2361 /** 2362 * beiscsi_find_mem_req()- Find mem needed 2363 * @phba: ptr to HBA struct 2364 **/ 2365 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2366 { 2367 uint8_t mem_descr_index, ulp_num; 2368 unsigned int num_async_pdu_buf_pages; 2369 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2370 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2371 2372 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2373 2374 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2375 BE_ISCSI_PDU_HEADER_SIZE; 2376 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2377 sizeof(struct hwi_context_memory); 2378 2379 2380 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2381 * (phba->params.wrbs_per_cxn) 2382 * phba->params.cxns_per_ctrl; 2383 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2384 (phba->params.wrbs_per_cxn); 2385 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2386 phba->params.cxns_per_ctrl); 2387 2388 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2389 phba->params.icds_per_ctrl; 2390 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2391 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2392 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2393 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2394 2395 num_async_pdu_buf_sgl_pages = 2396 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2397 phba, ulp_num) * 2398 sizeof(struct phys_addr)); 2399 2400 num_async_pdu_buf_pages = 2401 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2402 phba, ulp_num) * 2403 phba->params.defpdu_hdr_sz); 2404 2405 num_async_pdu_data_pages = 2406 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2407 phba, ulp_num) * 2408 phba->params.defpdu_data_sz); 2409 2410 num_async_pdu_data_sgl_pages = 2411 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2412 phba, ulp_num) * 2413 sizeof(struct phys_addr)); 2414 2415 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2416 (ulp_num * MEM_DESCR_OFFSET)); 2417 phba->mem_req[mem_descr_index] = 2418 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2419 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2420 2421 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2422 (ulp_num * MEM_DESCR_OFFSET)); 2423 phba->mem_req[mem_descr_index] = 2424 num_async_pdu_buf_pages * 2425 PAGE_SIZE; 2426 2427 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2428 (ulp_num * MEM_DESCR_OFFSET)); 2429 phba->mem_req[mem_descr_index] = 2430 num_async_pdu_data_pages * 2431 PAGE_SIZE; 2432 2433 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2434 (ulp_num * MEM_DESCR_OFFSET)); 2435 phba->mem_req[mem_descr_index] = 2436 num_async_pdu_buf_sgl_pages * 2437 PAGE_SIZE; 2438 2439 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2440 (ulp_num * MEM_DESCR_OFFSET)); 2441 phba->mem_req[mem_descr_index] = 2442 num_async_pdu_data_sgl_pages * 2443 PAGE_SIZE; 2444 2445 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2446 (ulp_num * MEM_DESCR_OFFSET)); 2447 phba->mem_req[mem_descr_index] = 2448 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2449 sizeof(struct hd_async_handle); 2450 2451 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2452 (ulp_num * MEM_DESCR_OFFSET)); 2453 phba->mem_req[mem_descr_index] = 2454 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2455 sizeof(struct hd_async_handle); 2456 2457 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2458 (ulp_num * MEM_DESCR_OFFSET)); 2459 phba->mem_req[mem_descr_index] = 2460 sizeof(struct hd_async_context) + 2461 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2462 sizeof(struct hd_async_entry)); 2463 } 2464 } 2465 } 2466 2467 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2468 { 2469 dma_addr_t bus_add; 2470 struct hwi_controller *phwi_ctrlr; 2471 struct be_mem_descriptor *mem_descr; 2472 struct mem_array *mem_arr, *mem_arr_orig; 2473 unsigned int i, j, alloc_size, curr_alloc_size; 2474 2475 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2476 if (!phba->phwi_ctrlr) 2477 return -ENOMEM; 2478 2479 /* Allocate memory for wrb_context */ 2480 phwi_ctrlr = phba->phwi_ctrlr; 2481 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2482 sizeof(struct hwi_wrb_context), 2483 GFP_KERNEL); 2484 if (!phwi_ctrlr->wrb_context) { 2485 kfree(phba->phwi_ctrlr); 2486 return -ENOMEM; 2487 } 2488 2489 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2490 GFP_KERNEL); 2491 if (!phba->init_mem) { 2492 kfree(phwi_ctrlr->wrb_context); 2493 kfree(phba->phwi_ctrlr); 2494 return -ENOMEM; 2495 } 2496 2497 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2498 sizeof(*mem_arr_orig), 2499 GFP_KERNEL); 2500 if (!mem_arr_orig) { 2501 kfree(phba->init_mem); 2502 kfree(phwi_ctrlr->wrb_context); 2503 kfree(phba->phwi_ctrlr); 2504 return -ENOMEM; 2505 } 2506 2507 mem_descr = phba->init_mem; 2508 for (i = 0; i < SE_MEM_MAX; i++) { 2509 if (!phba->mem_req[i]) { 2510 mem_descr->mem_array = NULL; 2511 mem_descr++; 2512 continue; 2513 } 2514 2515 j = 0; 2516 mem_arr = mem_arr_orig; 2517 alloc_size = phba->mem_req[i]; 2518 memset(mem_arr, 0, sizeof(struct mem_array) * 2519 BEISCSI_MAX_FRAGS_INIT); 2520 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2521 do { 2522 mem_arr->virtual_address = pci_alloc_consistent( 2523 phba->pcidev, 2524 curr_alloc_size, 2525 &bus_add); 2526 if (!mem_arr->virtual_address) { 2527 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2528 goto free_mem; 2529 if (curr_alloc_size - 2530 rounddown_pow_of_two(curr_alloc_size)) 2531 curr_alloc_size = rounddown_pow_of_two 2532 (curr_alloc_size); 2533 else 2534 curr_alloc_size = curr_alloc_size / 2; 2535 } else { 2536 mem_arr->bus_address.u. 2537 a64.address = (__u64) bus_add; 2538 mem_arr->size = curr_alloc_size; 2539 alloc_size -= curr_alloc_size; 2540 curr_alloc_size = min(be_max_phys_size * 2541 1024, alloc_size); 2542 j++; 2543 mem_arr++; 2544 } 2545 } while (alloc_size); 2546 mem_descr->num_elements = j; 2547 mem_descr->size_in_bytes = phba->mem_req[i]; 2548 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2549 GFP_KERNEL); 2550 if (!mem_descr->mem_array) 2551 goto free_mem; 2552 2553 memcpy(mem_descr->mem_array, mem_arr_orig, 2554 sizeof(struct mem_array) * j); 2555 mem_descr++; 2556 } 2557 kfree(mem_arr_orig); 2558 return 0; 2559 free_mem: 2560 mem_descr->num_elements = j; 2561 while ((i) || (j)) { 2562 for (j = mem_descr->num_elements; j > 0; j--) { 2563 pci_free_consistent(phba->pcidev, 2564 mem_descr->mem_array[j - 1].size, 2565 mem_descr->mem_array[j - 1]. 2566 virtual_address, 2567 (unsigned long)mem_descr-> 2568 mem_array[j - 1]. 2569 bus_address.u.a64.address); 2570 } 2571 if (i) { 2572 i--; 2573 kfree(mem_descr->mem_array); 2574 mem_descr--; 2575 } 2576 } 2577 kfree(mem_arr_orig); 2578 kfree(phba->init_mem); 2579 kfree(phba->phwi_ctrlr->wrb_context); 2580 kfree(phba->phwi_ctrlr); 2581 return -ENOMEM; 2582 } 2583 2584 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2585 { 2586 beiscsi_find_mem_req(phba); 2587 return beiscsi_alloc_mem(phba); 2588 } 2589 2590 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2591 { 2592 struct pdu_data_out *pdata_out; 2593 struct pdu_nop_out *pnop_out; 2594 struct be_mem_descriptor *mem_descr; 2595 2596 mem_descr = phba->init_mem; 2597 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2598 pdata_out = 2599 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2600 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2601 2602 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2603 IIOC_SCSI_DATA); 2604 2605 pnop_out = 2606 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2607 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2608 2609 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2610 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2611 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2612 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2613 } 2614 2615 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2616 { 2617 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2618 struct hwi_context_memory *phwi_ctxt; 2619 struct wrb_handle *pwrb_handle = NULL; 2620 struct hwi_controller *phwi_ctrlr; 2621 struct hwi_wrb_context *pwrb_context; 2622 struct iscsi_wrb *pwrb = NULL; 2623 unsigned int num_cxn_wrbh = 0; 2624 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2625 2626 mem_descr_wrbh = phba->init_mem; 2627 mem_descr_wrbh += HWI_MEM_WRBH; 2628 2629 mem_descr_wrb = phba->init_mem; 2630 mem_descr_wrb += HWI_MEM_WRB; 2631 phwi_ctrlr = phba->phwi_ctrlr; 2632 2633 /* Allocate memory for WRBQ */ 2634 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2635 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2636 sizeof(struct be_queue_info), 2637 GFP_KERNEL); 2638 if (!phwi_ctxt->be_wrbq) { 2639 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2640 "BM_%d : WRBQ Mem Alloc Failed\n"); 2641 return -ENOMEM; 2642 } 2643 2644 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2645 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2646 pwrb_context->pwrb_handle_base = 2647 kcalloc(phba->params.wrbs_per_cxn, 2648 sizeof(struct wrb_handle *), 2649 GFP_KERNEL); 2650 if (!pwrb_context->pwrb_handle_base) { 2651 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2652 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2653 goto init_wrb_hndl_failed; 2654 } 2655 pwrb_context->pwrb_handle_basestd = 2656 kcalloc(phba->params.wrbs_per_cxn, 2657 sizeof(struct wrb_handle *), 2658 GFP_KERNEL); 2659 if (!pwrb_context->pwrb_handle_basestd) { 2660 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2661 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2662 goto init_wrb_hndl_failed; 2663 } 2664 if (!num_cxn_wrbh) { 2665 pwrb_handle = 2666 mem_descr_wrbh->mem_array[idx].virtual_address; 2667 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2668 ((sizeof(struct wrb_handle)) * 2669 phba->params.wrbs_per_cxn)); 2670 idx++; 2671 } 2672 pwrb_context->alloc_index = 0; 2673 pwrb_context->wrb_handles_available = 0; 2674 pwrb_context->free_index = 0; 2675 2676 if (num_cxn_wrbh) { 2677 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2678 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2679 pwrb_context->pwrb_handle_basestd[j] = 2680 pwrb_handle; 2681 pwrb_context->wrb_handles_available++; 2682 pwrb_handle->wrb_index = j; 2683 pwrb_handle++; 2684 } 2685 num_cxn_wrbh--; 2686 } 2687 spin_lock_init(&pwrb_context->wrb_lock); 2688 } 2689 idx = 0; 2690 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2691 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2692 if (!num_cxn_wrb) { 2693 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2694 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2695 ((sizeof(struct iscsi_wrb) * 2696 phba->params.wrbs_per_cxn)); 2697 idx++; 2698 } 2699 2700 if (num_cxn_wrb) { 2701 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2702 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2703 pwrb_handle->pwrb = pwrb; 2704 pwrb++; 2705 } 2706 num_cxn_wrb--; 2707 } 2708 } 2709 return 0; 2710 init_wrb_hndl_failed: 2711 for (j = index; j > 0; j--) { 2712 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2713 kfree(pwrb_context->pwrb_handle_base); 2714 kfree(pwrb_context->pwrb_handle_basestd); 2715 } 2716 return -ENOMEM; 2717 } 2718 2719 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2720 { 2721 uint8_t ulp_num; 2722 struct hwi_controller *phwi_ctrlr; 2723 struct hba_parameters *p = &phba->params; 2724 struct hd_async_context *pasync_ctx; 2725 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2726 unsigned int index, idx, num_per_mem, num_async_data; 2727 struct be_mem_descriptor *mem_descr; 2728 2729 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2730 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2731 /* get async_ctx for each ULP */ 2732 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2733 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2734 (ulp_num * MEM_DESCR_OFFSET)); 2735 2736 phwi_ctrlr = phba->phwi_ctrlr; 2737 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2738 (struct hd_async_context *) 2739 mem_descr->mem_array[0].virtual_address; 2740 2741 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2742 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2743 2744 pasync_ctx->async_entry = 2745 (struct hd_async_entry *) 2746 ((long unsigned int)pasync_ctx + 2747 sizeof(struct hd_async_context)); 2748 2749 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2750 ulp_num); 2751 /* setup header buffers */ 2752 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2753 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2754 (ulp_num * MEM_DESCR_OFFSET); 2755 if (mem_descr->mem_array[0].virtual_address) { 2756 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2757 "BM_%d : hwi_init_async_pdu_ctx" 2758 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2759 ulp_num, 2760 mem_descr->mem_array[0]. 2761 virtual_address); 2762 } else 2763 beiscsi_log(phba, KERN_WARNING, 2764 BEISCSI_LOG_INIT, 2765 "BM_%d : No Virtual address for ULP : %d\n", 2766 ulp_num); 2767 2768 pasync_ctx->async_header.pi = 0; 2769 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2770 pasync_ctx->async_header.va_base = 2771 mem_descr->mem_array[0].virtual_address; 2772 2773 pasync_ctx->async_header.pa_base.u.a64.address = 2774 mem_descr->mem_array[0]. 2775 bus_address.u.a64.address; 2776 2777 /* setup header buffer sgls */ 2778 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2779 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2780 (ulp_num * MEM_DESCR_OFFSET); 2781 if (mem_descr->mem_array[0].virtual_address) { 2782 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2783 "BM_%d : hwi_init_async_pdu_ctx" 2784 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2785 ulp_num, 2786 mem_descr->mem_array[0]. 2787 virtual_address); 2788 } else 2789 beiscsi_log(phba, KERN_WARNING, 2790 BEISCSI_LOG_INIT, 2791 "BM_%d : No Virtual address for ULP : %d\n", 2792 ulp_num); 2793 2794 pasync_ctx->async_header.ring_base = 2795 mem_descr->mem_array[0].virtual_address; 2796 2797 /* setup header buffer handles */ 2798 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2799 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2800 (ulp_num * MEM_DESCR_OFFSET); 2801 if (mem_descr->mem_array[0].virtual_address) { 2802 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2803 "BM_%d : hwi_init_async_pdu_ctx" 2804 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2805 ulp_num, 2806 mem_descr->mem_array[0]. 2807 virtual_address); 2808 } else 2809 beiscsi_log(phba, KERN_WARNING, 2810 BEISCSI_LOG_INIT, 2811 "BM_%d : No Virtual address for ULP : %d\n", 2812 ulp_num); 2813 2814 pasync_ctx->async_header.handle_base = 2815 mem_descr->mem_array[0].virtual_address; 2816 2817 /* setup data buffer sgls */ 2818 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2819 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2820 (ulp_num * MEM_DESCR_OFFSET); 2821 if (mem_descr->mem_array[0].virtual_address) { 2822 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2823 "BM_%d : hwi_init_async_pdu_ctx" 2824 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2825 ulp_num, 2826 mem_descr->mem_array[0]. 2827 virtual_address); 2828 } else 2829 beiscsi_log(phba, KERN_WARNING, 2830 BEISCSI_LOG_INIT, 2831 "BM_%d : No Virtual address for ULP : %d\n", 2832 ulp_num); 2833 2834 pasync_ctx->async_data.ring_base = 2835 mem_descr->mem_array[0].virtual_address; 2836 2837 /* setup data buffer handles */ 2838 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2839 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2840 (ulp_num * MEM_DESCR_OFFSET); 2841 if (!mem_descr->mem_array[0].virtual_address) 2842 beiscsi_log(phba, KERN_WARNING, 2843 BEISCSI_LOG_INIT, 2844 "BM_%d : No Virtual address for ULP : %d\n", 2845 ulp_num); 2846 2847 pasync_ctx->async_data.handle_base = 2848 mem_descr->mem_array[0].virtual_address; 2849 2850 pasync_header_h = 2851 (struct hd_async_handle *) 2852 pasync_ctx->async_header.handle_base; 2853 pasync_data_h = 2854 (struct hd_async_handle *) 2855 pasync_ctx->async_data.handle_base; 2856 2857 /* setup data buffers */ 2858 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2859 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2860 (ulp_num * MEM_DESCR_OFFSET); 2861 if (mem_descr->mem_array[0].virtual_address) { 2862 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2863 "BM_%d : hwi_init_async_pdu_ctx" 2864 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2865 ulp_num, 2866 mem_descr->mem_array[0]. 2867 virtual_address); 2868 } else 2869 beiscsi_log(phba, KERN_WARNING, 2870 BEISCSI_LOG_INIT, 2871 "BM_%d : No Virtual address for ULP : %d\n", 2872 ulp_num); 2873 2874 idx = 0; 2875 pasync_ctx->async_data.pi = 0; 2876 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2877 pasync_ctx->async_data.va_base = 2878 mem_descr->mem_array[idx].virtual_address; 2879 pasync_ctx->async_data.pa_base.u.a64.address = 2880 mem_descr->mem_array[idx]. 2881 bus_address.u.a64.address; 2882 2883 num_async_data = ((mem_descr->mem_array[idx].size) / 2884 phba->params.defpdu_data_sz); 2885 num_per_mem = 0; 2886 2887 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2888 (phba, ulp_num); index++) { 2889 pasync_header_h->cri = -1; 2890 pasync_header_h->is_header = 1; 2891 pasync_header_h->index = index; 2892 INIT_LIST_HEAD(&pasync_header_h->link); 2893 pasync_header_h->pbuffer = 2894 (void *)((unsigned long) 2895 (pasync_ctx-> 2896 async_header.va_base) + 2897 (p->defpdu_hdr_sz * index)); 2898 2899 pasync_header_h->pa.u.a64.address = 2900 pasync_ctx->async_header.pa_base.u.a64. 2901 address + (p->defpdu_hdr_sz * index); 2902 2903 pasync_ctx->async_entry[index].header = 2904 pasync_header_h; 2905 pasync_header_h++; 2906 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2907 wq.list); 2908 2909 pasync_data_h->cri = -1; 2910 pasync_data_h->is_header = 0; 2911 pasync_data_h->index = index; 2912 INIT_LIST_HEAD(&pasync_data_h->link); 2913 2914 if (!num_async_data) { 2915 num_per_mem = 0; 2916 idx++; 2917 pasync_ctx->async_data.va_base = 2918 mem_descr->mem_array[idx]. 2919 virtual_address; 2920 pasync_ctx->async_data.pa_base.u. 2921 a64.address = 2922 mem_descr->mem_array[idx]. 2923 bus_address.u.a64.address; 2924 num_async_data = 2925 ((mem_descr->mem_array[idx]. 2926 size) / 2927 phba->params.defpdu_data_sz); 2928 } 2929 pasync_data_h->pbuffer = 2930 (void *)((unsigned long) 2931 (pasync_ctx->async_data.va_base) + 2932 (p->defpdu_data_sz * num_per_mem)); 2933 2934 pasync_data_h->pa.u.a64.address = 2935 pasync_ctx->async_data.pa_base.u.a64. 2936 address + (p->defpdu_data_sz * 2937 num_per_mem); 2938 num_per_mem++; 2939 num_async_data--; 2940 2941 pasync_ctx->async_entry[index].data = 2942 pasync_data_h; 2943 pasync_data_h++; 2944 } 2945 } 2946 } 2947 2948 return 0; 2949 } 2950 2951 static int 2952 be_sgl_create_contiguous(void *virtual_address, 2953 u64 physical_address, u32 length, 2954 struct be_dma_mem *sgl) 2955 { 2956 WARN_ON(!virtual_address); 2957 WARN_ON(!physical_address); 2958 WARN_ON(!length); 2959 WARN_ON(!sgl); 2960 2961 sgl->va = virtual_address; 2962 sgl->dma = (unsigned long)physical_address; 2963 sgl->size = length; 2964 2965 return 0; 2966 } 2967 2968 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2969 { 2970 memset(sgl, 0, sizeof(*sgl)); 2971 } 2972 2973 static void 2974 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2975 struct mem_array *pmem, struct be_dma_mem *sgl) 2976 { 2977 if (sgl->va) 2978 be_sgl_destroy_contiguous(sgl); 2979 2980 be_sgl_create_contiguous(pmem->virtual_address, 2981 pmem->bus_address.u.a64.address, 2982 pmem->size, sgl); 2983 } 2984 2985 static void 2986 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2987 struct mem_array *pmem, struct be_dma_mem *sgl) 2988 { 2989 if (sgl->va) 2990 be_sgl_destroy_contiguous(sgl); 2991 2992 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2993 pmem->bus_address.u.a64.address, 2994 pmem->size, sgl); 2995 } 2996 2997 static int be_fill_queue(struct be_queue_info *q, 2998 u16 len, u16 entry_size, void *vaddress) 2999 { 3000 struct be_dma_mem *mem = &q->dma_mem; 3001 3002 memset(q, 0, sizeof(*q)); 3003 q->len = len; 3004 q->entry_size = entry_size; 3005 mem->size = len * entry_size; 3006 mem->va = vaddress; 3007 if (!mem->va) 3008 return -ENOMEM; 3009 memset(mem->va, 0, mem->size); 3010 return 0; 3011 } 3012 3013 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3014 struct hwi_context_memory *phwi_context) 3015 { 3016 int ret = -ENOMEM, eq_for_mcc; 3017 unsigned int i, num_eq_pages; 3018 struct be_queue_info *eq; 3019 struct be_dma_mem *mem; 3020 void *eq_vaddress; 3021 dma_addr_t paddr; 3022 3023 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3024 sizeof(struct be_eq_entry)); 3025 3026 if (phba->pcidev->msix_enabled) 3027 eq_for_mcc = 1; 3028 else 3029 eq_for_mcc = 0; 3030 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3031 eq = &phwi_context->be_eq[i].q; 3032 mem = &eq->dma_mem; 3033 phwi_context->be_eq[i].phba = phba; 3034 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3035 num_eq_pages * PAGE_SIZE, 3036 &paddr); 3037 if (!eq_vaddress) { 3038 ret = -ENOMEM; 3039 goto create_eq_error; 3040 } 3041 3042 mem->va = eq_vaddress; 3043 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3044 sizeof(struct be_eq_entry), eq_vaddress); 3045 if (ret) { 3046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3047 "BM_%d : be_fill_queue Failed for EQ\n"); 3048 goto create_eq_error; 3049 } 3050 3051 mem->dma = paddr; 3052 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3053 BEISCSI_EQ_DELAY_DEF); 3054 if (ret) { 3055 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3056 "BM_%d : beiscsi_cmd_eq_create" 3057 "Failed for EQ\n"); 3058 goto create_eq_error; 3059 } 3060 3061 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3062 "BM_%d : eqid = %d\n", 3063 phwi_context->be_eq[i].q.id); 3064 } 3065 return 0; 3066 3067 create_eq_error: 3068 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3069 eq = &phwi_context->be_eq[i].q; 3070 mem = &eq->dma_mem; 3071 if (mem->va) 3072 pci_free_consistent(phba->pcidev, num_eq_pages 3073 * PAGE_SIZE, 3074 mem->va, mem->dma); 3075 } 3076 return ret; 3077 } 3078 3079 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3080 struct hwi_context_memory *phwi_context) 3081 { 3082 unsigned int i, num_cq_pages; 3083 struct be_queue_info *cq, *eq; 3084 struct be_dma_mem *mem; 3085 struct be_eq_obj *pbe_eq; 3086 void *cq_vaddress; 3087 int ret = -ENOMEM; 3088 dma_addr_t paddr; 3089 3090 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3091 sizeof(struct sol_cqe)); 3092 3093 for (i = 0; i < phba->num_cpus; i++) { 3094 cq = &phwi_context->be_cq[i]; 3095 eq = &phwi_context->be_eq[i].q; 3096 pbe_eq = &phwi_context->be_eq[i]; 3097 pbe_eq->cq = cq; 3098 pbe_eq->phba = phba; 3099 mem = &cq->dma_mem; 3100 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3101 num_cq_pages * PAGE_SIZE, 3102 &paddr); 3103 if (!cq_vaddress) { 3104 ret = -ENOMEM; 3105 goto create_cq_error; 3106 } 3107 3108 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3109 sizeof(struct sol_cqe), cq_vaddress); 3110 if (ret) { 3111 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3112 "BM_%d : be_fill_queue Failed " 3113 "for ISCSI CQ\n"); 3114 goto create_cq_error; 3115 } 3116 3117 mem->dma = paddr; 3118 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3119 false, 0); 3120 if (ret) { 3121 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3122 "BM_%d : beiscsi_cmd_eq_create" 3123 "Failed for ISCSI CQ\n"); 3124 goto create_cq_error; 3125 } 3126 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3127 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3128 "iSCSI CQ CREATED\n", cq->id, eq->id); 3129 } 3130 return 0; 3131 3132 create_cq_error: 3133 for (i = 0; i < phba->num_cpus; i++) { 3134 cq = &phwi_context->be_cq[i]; 3135 mem = &cq->dma_mem; 3136 if (mem->va) 3137 pci_free_consistent(phba->pcidev, num_cq_pages 3138 * PAGE_SIZE, 3139 mem->va, mem->dma); 3140 } 3141 return ret; 3142 } 3143 3144 static int 3145 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3146 struct hwi_context_memory *phwi_context, 3147 struct hwi_controller *phwi_ctrlr, 3148 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3149 { 3150 unsigned int idx; 3151 int ret; 3152 struct be_queue_info *dq, *cq; 3153 struct be_dma_mem *mem; 3154 struct be_mem_descriptor *mem_descr; 3155 void *dq_vaddress; 3156 3157 idx = 0; 3158 dq = &phwi_context->be_def_hdrq[ulp_num]; 3159 cq = &phwi_context->be_cq[0]; 3160 mem = &dq->dma_mem; 3161 mem_descr = phba->init_mem; 3162 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3163 (ulp_num * MEM_DESCR_OFFSET); 3164 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3165 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3166 sizeof(struct phys_addr), 3167 sizeof(struct phys_addr), dq_vaddress); 3168 if (ret) { 3169 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3170 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3171 ulp_num); 3172 3173 return ret; 3174 } 3175 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3176 bus_address.u.a64.address; 3177 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3178 def_pdu_ring_sz, 3179 phba->params.defpdu_hdr_sz, 3180 BEISCSI_DEFQ_HDR, ulp_num); 3181 if (ret) { 3182 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3183 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3184 ulp_num); 3185 3186 return ret; 3187 } 3188 3189 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3190 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3191 ulp_num, 3192 phwi_context->be_def_hdrq[ulp_num].id); 3193 return 0; 3194 } 3195 3196 static int 3197 beiscsi_create_def_data(struct beiscsi_hba *phba, 3198 struct hwi_context_memory *phwi_context, 3199 struct hwi_controller *phwi_ctrlr, 3200 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3201 { 3202 unsigned int idx; 3203 int ret; 3204 struct be_queue_info *dataq, *cq; 3205 struct be_dma_mem *mem; 3206 struct be_mem_descriptor *mem_descr; 3207 void *dq_vaddress; 3208 3209 idx = 0; 3210 dataq = &phwi_context->be_def_dataq[ulp_num]; 3211 cq = &phwi_context->be_cq[0]; 3212 mem = &dataq->dma_mem; 3213 mem_descr = phba->init_mem; 3214 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3215 (ulp_num * MEM_DESCR_OFFSET); 3216 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3217 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3218 sizeof(struct phys_addr), 3219 sizeof(struct phys_addr), dq_vaddress); 3220 if (ret) { 3221 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3222 "BM_%d : be_fill_queue Failed for DEF PDU " 3223 "DATA on ULP : %d\n", 3224 ulp_num); 3225 3226 return ret; 3227 } 3228 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3229 bus_address.u.a64.address; 3230 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3231 def_pdu_ring_sz, 3232 phba->params.defpdu_data_sz, 3233 BEISCSI_DEFQ_DATA, ulp_num); 3234 if (ret) { 3235 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3236 "BM_%d be_cmd_create_default_pdu_queue" 3237 " Failed for DEF PDU DATA on ULP : %d\n", 3238 ulp_num); 3239 return ret; 3240 } 3241 3242 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3243 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3244 ulp_num, 3245 phwi_context->be_def_dataq[ulp_num].id); 3246 3247 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3248 "BM_%d : DEFAULT PDU DATA RING CREATED" 3249 "on ULP : %d\n", ulp_num); 3250 return 0; 3251 } 3252 3253 3254 static int 3255 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3256 { 3257 struct be_mem_descriptor *mem_descr; 3258 struct mem_array *pm_arr; 3259 struct be_dma_mem sgl; 3260 int status, ulp_num; 3261 3262 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3263 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3264 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3265 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3266 (ulp_num * MEM_DESCR_OFFSET); 3267 pm_arr = mem_descr->mem_array; 3268 3269 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3270 status = be_cmd_iscsi_post_template_hdr( 3271 &phba->ctrl, &sgl); 3272 3273 if (status != 0) { 3274 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3275 "BM_%d : Post Template HDR Failed for" 3276 "ULP_%d\n", ulp_num); 3277 return status; 3278 } 3279 3280 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3281 "BM_%d : Template HDR Pages Posted for" 3282 "ULP_%d\n", ulp_num); 3283 } 3284 } 3285 return 0; 3286 } 3287 3288 static int 3289 beiscsi_post_pages(struct beiscsi_hba *phba) 3290 { 3291 struct be_mem_descriptor *mem_descr; 3292 struct mem_array *pm_arr; 3293 unsigned int page_offset, i; 3294 struct be_dma_mem sgl; 3295 int status, ulp_num = 0; 3296 3297 mem_descr = phba->init_mem; 3298 mem_descr += HWI_MEM_SGE; 3299 pm_arr = mem_descr->mem_array; 3300 3301 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3302 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3303 break; 3304 3305 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3306 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3307 for (i = 0; i < mem_descr->num_elements; i++) { 3308 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3309 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3310 page_offset, 3311 (pm_arr->size / PAGE_SIZE)); 3312 page_offset += pm_arr->size / PAGE_SIZE; 3313 if (status != 0) { 3314 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3315 "BM_%d : post sgl failed.\n"); 3316 return status; 3317 } 3318 pm_arr++; 3319 } 3320 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3321 "BM_%d : POSTED PAGES\n"); 3322 return 0; 3323 } 3324 3325 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3326 { 3327 struct be_dma_mem *mem = &q->dma_mem; 3328 if (mem->va) { 3329 pci_free_consistent(phba->pcidev, mem->size, 3330 mem->va, mem->dma); 3331 mem->va = NULL; 3332 } 3333 } 3334 3335 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3336 u16 len, u16 entry_size) 3337 { 3338 struct be_dma_mem *mem = &q->dma_mem; 3339 3340 memset(q, 0, sizeof(*q)); 3341 q->len = len; 3342 q->entry_size = entry_size; 3343 mem->size = len * entry_size; 3344 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3345 if (!mem->va) 3346 return -ENOMEM; 3347 return 0; 3348 } 3349 3350 static int 3351 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3352 struct hwi_context_memory *phwi_context, 3353 struct hwi_controller *phwi_ctrlr) 3354 { 3355 unsigned int num_wrb_rings; 3356 u64 pa_addr_lo; 3357 unsigned int idx, num, i, ulp_num; 3358 struct mem_array *pwrb_arr; 3359 void *wrb_vaddr; 3360 struct be_dma_mem sgl; 3361 struct be_mem_descriptor *mem_descr; 3362 struct hwi_wrb_context *pwrb_context; 3363 int status; 3364 uint8_t ulp_count = 0, ulp_base_num = 0; 3365 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3366 3367 idx = 0; 3368 mem_descr = phba->init_mem; 3369 mem_descr += HWI_MEM_WRB; 3370 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3371 sizeof(*pwrb_arr), 3372 GFP_KERNEL); 3373 if (!pwrb_arr) { 3374 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3375 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3376 return -ENOMEM; 3377 } 3378 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3379 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3380 num_wrb_rings = mem_descr->mem_array[idx].size / 3381 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3382 3383 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3384 if (num_wrb_rings) { 3385 pwrb_arr[num].virtual_address = wrb_vaddr; 3386 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3387 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3388 sizeof(struct iscsi_wrb); 3389 wrb_vaddr += pwrb_arr[num].size; 3390 pa_addr_lo += pwrb_arr[num].size; 3391 num_wrb_rings--; 3392 } else { 3393 idx++; 3394 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3395 pa_addr_lo = mem_descr->mem_array[idx].\ 3396 bus_address.u.a64.address; 3397 num_wrb_rings = mem_descr->mem_array[idx].size / 3398 (phba->params.wrbs_per_cxn * 3399 sizeof(struct iscsi_wrb)); 3400 pwrb_arr[num].virtual_address = wrb_vaddr; 3401 pwrb_arr[num].bus_address.u.a64.address\ 3402 = pa_addr_lo; 3403 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3404 sizeof(struct iscsi_wrb); 3405 wrb_vaddr += pwrb_arr[num].size; 3406 pa_addr_lo += pwrb_arr[num].size; 3407 num_wrb_rings--; 3408 } 3409 } 3410 3411 /* Get the ULP Count */ 3412 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3413 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3414 ulp_count++; 3415 ulp_base_num = ulp_num; 3416 cid_count_ulp[ulp_num] = 3417 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3418 } 3419 3420 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3421 if (ulp_count > 1) { 3422 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3423 3424 if (!cid_count_ulp[ulp_base_num]) 3425 ulp_base_num = (ulp_base_num + 1) % 3426 BEISCSI_ULP_COUNT; 3427 3428 cid_count_ulp[ulp_base_num]--; 3429 } 3430 3431 3432 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3433 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3434 &phwi_context->be_wrbq[i], 3435 &phwi_ctrlr->wrb_context[i], 3436 ulp_base_num); 3437 if (status != 0) { 3438 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3439 "BM_%d : wrbq create failed."); 3440 kfree(pwrb_arr); 3441 return status; 3442 } 3443 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3444 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3445 } 3446 kfree(pwrb_arr); 3447 return 0; 3448 } 3449 3450 static void free_wrb_handles(struct beiscsi_hba *phba) 3451 { 3452 unsigned int index; 3453 struct hwi_controller *phwi_ctrlr; 3454 struct hwi_wrb_context *pwrb_context; 3455 3456 phwi_ctrlr = phba->phwi_ctrlr; 3457 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3458 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3459 kfree(pwrb_context->pwrb_handle_base); 3460 kfree(pwrb_context->pwrb_handle_basestd); 3461 } 3462 } 3463 3464 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3465 { 3466 struct be_ctrl_info *ctrl = &phba->ctrl; 3467 struct be_dma_mem *ptag_mem; 3468 struct be_queue_info *q; 3469 int i, tag; 3470 3471 q = &phba->ctrl.mcc_obj.q; 3472 for (i = 0; i < MAX_MCC_CMD; i++) { 3473 tag = i + 1; 3474 if (!test_bit(MCC_TAG_STATE_RUNNING, 3475 &ctrl->ptag_state[tag].tag_state)) 3476 continue; 3477 3478 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3479 &ctrl->ptag_state[tag].tag_state)) { 3480 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3481 if (ptag_mem->size) { 3482 pci_free_consistent(ctrl->pdev, 3483 ptag_mem->size, 3484 ptag_mem->va, 3485 ptag_mem->dma); 3486 ptag_mem->size = 0; 3487 } 3488 continue; 3489 } 3490 /** 3491 * If MCC is still active and waiting then wake up the process. 3492 * We are here only because port is going offline. The process 3493 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3494 * returned for the operation and allocated memory cleaned up. 3495 */ 3496 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3497 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3498 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3499 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3500 /* 3501 * Control tag info gets reinitialized in enable 3502 * so wait for the process to clear running state. 3503 */ 3504 while (test_bit(MCC_TAG_STATE_RUNNING, 3505 &ctrl->ptag_state[tag].tag_state)) 3506 schedule_timeout_uninterruptible(HZ); 3507 } 3508 /** 3509 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3510 * MCC_TAG_STATE_IGNORE nothing needs to done. 3511 */ 3512 } 3513 if (q->created) { 3514 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3515 be_queue_free(phba, q); 3516 } 3517 3518 q = &phba->ctrl.mcc_obj.cq; 3519 if (q->created) { 3520 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3521 be_queue_free(phba, q); 3522 } 3523 } 3524 3525 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3526 struct hwi_context_memory *phwi_context) 3527 { 3528 struct be_queue_info *q, *cq; 3529 struct be_ctrl_info *ctrl = &phba->ctrl; 3530 3531 /* Alloc MCC compl queue */ 3532 cq = &phba->ctrl.mcc_obj.cq; 3533 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3534 sizeof(struct be_mcc_compl))) 3535 goto err; 3536 /* Ask BE to create MCC compl queue; */ 3537 if (phba->pcidev->msix_enabled) { 3538 if (beiscsi_cmd_cq_create(ctrl, cq, 3539 &phwi_context->be_eq[phba->num_cpus].q, 3540 false, true, 0)) 3541 goto mcc_cq_free; 3542 } else { 3543 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3544 false, true, 0)) 3545 goto mcc_cq_free; 3546 } 3547 3548 /* Alloc MCC queue */ 3549 q = &phba->ctrl.mcc_obj.q; 3550 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3551 goto mcc_cq_destroy; 3552 3553 /* Ask BE to create MCC queue */ 3554 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3555 goto mcc_q_free; 3556 3557 return 0; 3558 3559 mcc_q_free: 3560 be_queue_free(phba, q); 3561 mcc_cq_destroy: 3562 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3563 mcc_cq_free: 3564 be_queue_free(phba, cq); 3565 err: 3566 return -ENOMEM; 3567 } 3568 3569 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3570 { 3571 int nvec = 1; 3572 3573 switch (phba->generation) { 3574 case BE_GEN2: 3575 case BE_GEN3: 3576 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3577 break; 3578 case BE_GEN4: 3579 nvec = phba->fw_config.eqid_count; 3580 break; 3581 default: 3582 nvec = 2; 3583 break; 3584 } 3585 3586 /* if eqid_count == 1 fall back to INTX */ 3587 if (enable_msix && nvec > 1) { 3588 const struct irq_affinity desc = { .post_vectors = 1 }; 3589 3590 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3591 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3592 phba->num_cpus = nvec - 1; 3593 return; 3594 } 3595 } 3596 3597 phba->num_cpus = 1; 3598 } 3599 3600 static void hwi_purge_eq(struct beiscsi_hba *phba) 3601 { 3602 struct hwi_controller *phwi_ctrlr; 3603 struct hwi_context_memory *phwi_context; 3604 struct be_queue_info *eq; 3605 struct be_eq_entry *eqe = NULL; 3606 int i, eq_msix; 3607 unsigned int num_processed; 3608 3609 if (beiscsi_hba_in_error(phba)) 3610 return; 3611 3612 phwi_ctrlr = phba->phwi_ctrlr; 3613 phwi_context = phwi_ctrlr->phwi_ctxt; 3614 if (phba->pcidev->msix_enabled) 3615 eq_msix = 1; 3616 else 3617 eq_msix = 0; 3618 3619 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3620 eq = &phwi_context->be_eq[i].q; 3621 eqe = queue_tail_node(eq); 3622 num_processed = 0; 3623 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3624 & EQE_VALID_MASK) { 3625 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3626 queue_tail_inc(eq); 3627 eqe = queue_tail_node(eq); 3628 num_processed++; 3629 } 3630 3631 if (num_processed) 3632 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3633 } 3634 } 3635 3636 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3637 { 3638 struct be_queue_info *q; 3639 struct be_ctrl_info *ctrl = &phba->ctrl; 3640 struct hwi_controller *phwi_ctrlr; 3641 struct hwi_context_memory *phwi_context; 3642 int i, eq_for_mcc, ulp_num; 3643 3644 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3645 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3646 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3647 3648 /** 3649 * Purge all EQ entries that may have been left out. This is to 3650 * workaround a problem we've seen occasionally where driver gets an 3651 * interrupt with EQ entry bit set after stopping the controller. 3652 */ 3653 hwi_purge_eq(phba); 3654 3655 phwi_ctrlr = phba->phwi_ctrlr; 3656 phwi_context = phwi_ctrlr->phwi_ctxt; 3657 3658 be_cmd_iscsi_remove_template_hdr(ctrl); 3659 3660 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3661 q = &phwi_context->be_wrbq[i]; 3662 if (q->created) 3663 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3664 } 3665 kfree(phwi_context->be_wrbq); 3666 free_wrb_handles(phba); 3667 3668 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3669 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3670 3671 q = &phwi_context->be_def_hdrq[ulp_num]; 3672 if (q->created) 3673 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3674 3675 q = &phwi_context->be_def_dataq[ulp_num]; 3676 if (q->created) 3677 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3678 } 3679 } 3680 3681 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3682 3683 for (i = 0; i < (phba->num_cpus); i++) { 3684 q = &phwi_context->be_cq[i]; 3685 if (q->created) { 3686 be_queue_free(phba, q); 3687 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3688 } 3689 } 3690 3691 be_mcc_queues_destroy(phba); 3692 if (phba->pcidev->msix_enabled) 3693 eq_for_mcc = 1; 3694 else 3695 eq_for_mcc = 0; 3696 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3697 q = &phwi_context->be_eq[i].q; 3698 if (q->created) { 3699 be_queue_free(phba, q); 3700 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3701 } 3702 } 3703 /* this ensures complete FW cleanup */ 3704 beiscsi_cmd_function_reset(phba); 3705 /* last communication, indicate driver is unloading */ 3706 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3707 } 3708 3709 static int hwi_init_port(struct beiscsi_hba *phba) 3710 { 3711 struct hwi_controller *phwi_ctrlr; 3712 struct hwi_context_memory *phwi_context; 3713 unsigned int def_pdu_ring_sz; 3714 struct be_ctrl_info *ctrl = &phba->ctrl; 3715 int status, ulp_num; 3716 u16 nbufs; 3717 3718 phwi_ctrlr = phba->phwi_ctrlr; 3719 phwi_context = phwi_ctrlr->phwi_ctxt; 3720 /* set port optic state to unknown */ 3721 phba->optic_state = 0xff; 3722 3723 status = beiscsi_create_eqs(phba, phwi_context); 3724 if (status != 0) { 3725 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3726 "BM_%d : EQ not created\n"); 3727 goto error; 3728 } 3729 3730 status = be_mcc_queues_create(phba, phwi_context); 3731 if (status != 0) 3732 goto error; 3733 3734 status = beiscsi_check_supported_fw(ctrl, phba); 3735 if (status != 0) { 3736 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3737 "BM_%d : Unsupported fw version\n"); 3738 goto error; 3739 } 3740 3741 status = beiscsi_create_cqs(phba, phwi_context); 3742 if (status != 0) { 3743 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3744 "BM_%d : CQ not created\n"); 3745 goto error; 3746 } 3747 3748 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3749 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3750 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3751 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3752 3753 status = beiscsi_create_def_hdr(phba, phwi_context, 3754 phwi_ctrlr, 3755 def_pdu_ring_sz, 3756 ulp_num); 3757 if (status != 0) { 3758 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3759 "BM_%d : Default Header not created for ULP : %d\n", 3760 ulp_num); 3761 goto error; 3762 } 3763 3764 status = beiscsi_create_def_data(phba, phwi_context, 3765 phwi_ctrlr, 3766 def_pdu_ring_sz, 3767 ulp_num); 3768 if (status != 0) { 3769 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3770 "BM_%d : Default Data not created for ULP : %d\n", 3771 ulp_num); 3772 goto error; 3773 } 3774 /** 3775 * Now that the default PDU rings have been created, 3776 * let EP know about it. 3777 */ 3778 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3779 ulp_num, nbufs); 3780 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3781 ulp_num, nbufs); 3782 } 3783 } 3784 3785 status = beiscsi_post_pages(phba); 3786 if (status != 0) { 3787 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3788 "BM_%d : Post SGL Pages Failed\n"); 3789 goto error; 3790 } 3791 3792 status = beiscsi_post_template_hdr(phba); 3793 if (status != 0) { 3794 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3795 "BM_%d : Template HDR Posting for CXN Failed\n"); 3796 } 3797 3798 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3799 if (status != 0) { 3800 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3801 "BM_%d : WRB Rings not created\n"); 3802 goto error; 3803 } 3804 3805 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3806 uint16_t async_arr_idx = 0; 3807 3808 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3809 uint16_t cri = 0; 3810 struct hd_async_context *pasync_ctx; 3811 3812 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3813 phwi_ctrlr, ulp_num); 3814 for (cri = 0; cri < 3815 phba->params.cxns_per_ctrl; cri++) { 3816 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3817 (phwi_ctrlr, cri)) 3818 pasync_ctx->cid_to_async_cri_map[ 3819 phwi_ctrlr->wrb_context[cri].cid] = 3820 async_arr_idx++; 3821 } 3822 } 3823 } 3824 3825 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3826 "BM_%d : hwi_init_port success\n"); 3827 return 0; 3828 3829 error: 3830 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3831 "BM_%d : hwi_init_port failed"); 3832 hwi_cleanup_port(phba); 3833 return status; 3834 } 3835 3836 static int hwi_init_controller(struct beiscsi_hba *phba) 3837 { 3838 struct hwi_controller *phwi_ctrlr; 3839 3840 phwi_ctrlr = phba->phwi_ctrlr; 3841 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3842 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3843 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3844 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3845 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3846 phwi_ctrlr->phwi_ctxt); 3847 } else { 3848 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3849 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3850 "than one element.Failing to load\n"); 3851 return -ENOMEM; 3852 } 3853 3854 iscsi_init_global_templates(phba); 3855 if (beiscsi_init_wrb_handle(phba)) 3856 return -ENOMEM; 3857 3858 if (hwi_init_async_pdu_ctx(phba)) { 3859 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3860 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3861 return -ENOMEM; 3862 } 3863 3864 if (hwi_init_port(phba) != 0) { 3865 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3866 "BM_%d : hwi_init_controller failed\n"); 3867 3868 return -ENOMEM; 3869 } 3870 return 0; 3871 } 3872 3873 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3874 { 3875 struct be_mem_descriptor *mem_descr; 3876 int i, j; 3877 3878 mem_descr = phba->init_mem; 3879 i = 0; 3880 j = 0; 3881 for (i = 0; i < SE_MEM_MAX; i++) { 3882 for (j = mem_descr->num_elements; j > 0; j--) { 3883 pci_free_consistent(phba->pcidev, 3884 mem_descr->mem_array[j - 1].size, 3885 mem_descr->mem_array[j - 1].virtual_address, 3886 (unsigned long)mem_descr->mem_array[j - 1]. 3887 bus_address.u.a64.address); 3888 } 3889 3890 kfree(mem_descr->mem_array); 3891 mem_descr++; 3892 } 3893 kfree(phba->init_mem); 3894 kfree(phba->phwi_ctrlr->wrb_context); 3895 kfree(phba->phwi_ctrlr); 3896 } 3897 3898 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3899 { 3900 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3901 struct sgl_handle *psgl_handle; 3902 struct iscsi_sge *pfrag; 3903 unsigned int arr_index, i, idx; 3904 unsigned int ulp_icd_start, ulp_num = 0; 3905 3906 phba->io_sgl_hndl_avbl = 0; 3907 phba->eh_sgl_hndl_avbl = 0; 3908 3909 mem_descr_sglh = phba->init_mem; 3910 mem_descr_sglh += HWI_MEM_SGLH; 3911 if (1 == mem_descr_sglh->num_elements) { 3912 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3913 sizeof(struct sgl_handle *), 3914 GFP_KERNEL); 3915 if (!phba->io_sgl_hndl_base) { 3916 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3917 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3918 return -ENOMEM; 3919 } 3920 phba->eh_sgl_hndl_base = 3921 kcalloc(phba->params.icds_per_ctrl - 3922 phba->params.ios_per_ctrl, 3923 sizeof(struct sgl_handle *), GFP_KERNEL); 3924 if (!phba->eh_sgl_hndl_base) { 3925 kfree(phba->io_sgl_hndl_base); 3926 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3927 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3928 return -ENOMEM; 3929 } 3930 } else { 3931 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3932 "BM_%d : HWI_MEM_SGLH is more than one element." 3933 "Failing to load\n"); 3934 return -ENOMEM; 3935 } 3936 3937 arr_index = 0; 3938 idx = 0; 3939 while (idx < mem_descr_sglh->num_elements) { 3940 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3941 3942 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3943 sizeof(struct sgl_handle)); i++) { 3944 if (arr_index < phba->params.ios_per_ctrl) { 3945 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3946 phba->io_sgl_hndl_avbl++; 3947 arr_index++; 3948 } else { 3949 phba->eh_sgl_hndl_base[arr_index - 3950 phba->params.ios_per_ctrl] = 3951 psgl_handle; 3952 arr_index++; 3953 phba->eh_sgl_hndl_avbl++; 3954 } 3955 psgl_handle++; 3956 } 3957 idx++; 3958 } 3959 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3960 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3961 "phba->eh_sgl_hndl_avbl=%d\n", 3962 phba->io_sgl_hndl_avbl, 3963 phba->eh_sgl_hndl_avbl); 3964 3965 mem_descr_sg = phba->init_mem; 3966 mem_descr_sg += HWI_MEM_SGE; 3967 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3968 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3969 mem_descr_sg->num_elements); 3970 3971 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3972 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3973 break; 3974 3975 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3976 3977 arr_index = 0; 3978 idx = 0; 3979 while (idx < mem_descr_sg->num_elements) { 3980 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3981 3982 for (i = 0; 3983 i < (mem_descr_sg->mem_array[idx].size) / 3984 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3985 i++) { 3986 if (arr_index < phba->params.ios_per_ctrl) 3987 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3988 else 3989 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3990 phba->params.ios_per_ctrl]; 3991 psgl_handle->pfrag = pfrag; 3992 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3993 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3994 pfrag += phba->params.num_sge_per_io; 3995 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3996 } 3997 idx++; 3998 } 3999 phba->io_sgl_free_index = 0; 4000 phba->io_sgl_alloc_index = 0; 4001 phba->eh_sgl_free_index = 0; 4002 phba->eh_sgl_alloc_index = 0; 4003 return 0; 4004 } 4005 4006 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4007 { 4008 int ret; 4009 uint16_t i, ulp_num; 4010 struct ulp_cid_info *ptr_cid_info = NULL; 4011 4012 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4013 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4014 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4015 GFP_KERNEL); 4016 4017 if (!ptr_cid_info) { 4018 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4019 "BM_%d : Failed to allocate memory" 4020 "for ULP_CID_INFO for ULP : %d\n", 4021 ulp_num); 4022 ret = -ENOMEM; 4023 goto free_memory; 4024 4025 } 4026 4027 /* Allocate memory for CID array */ 4028 ptr_cid_info->cid_array = 4029 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4030 sizeof(*ptr_cid_info->cid_array), 4031 GFP_KERNEL); 4032 if (!ptr_cid_info->cid_array) { 4033 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4034 "BM_%d : Failed to allocate memory" 4035 "for CID_ARRAY for ULP : %d\n", 4036 ulp_num); 4037 kfree(ptr_cid_info); 4038 ptr_cid_info = NULL; 4039 ret = -ENOMEM; 4040 4041 goto free_memory; 4042 } 4043 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4044 phba, ulp_num); 4045 4046 /* Save the cid_info_array ptr */ 4047 phba->cid_array_info[ulp_num] = ptr_cid_info; 4048 } 4049 } 4050 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4051 sizeof(struct iscsi_endpoint *), 4052 GFP_KERNEL); 4053 if (!phba->ep_array) { 4054 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4055 "BM_%d : Failed to allocate memory in " 4056 "hba_setup_cid_tbls\n"); 4057 ret = -ENOMEM; 4058 4059 goto free_memory; 4060 } 4061 4062 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4063 sizeof(struct beiscsi_conn *), 4064 GFP_KERNEL); 4065 if (!phba->conn_table) { 4066 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4067 "BM_%d : Failed to allocate memory in" 4068 "hba_setup_cid_tbls\n"); 4069 4070 kfree(phba->ep_array); 4071 phba->ep_array = NULL; 4072 ret = -ENOMEM; 4073 4074 goto free_memory; 4075 } 4076 4077 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4078 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4079 4080 ptr_cid_info = phba->cid_array_info[ulp_num]; 4081 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4082 phba->phwi_ctrlr->wrb_context[i].cid; 4083 4084 } 4085 4086 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4087 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4088 ptr_cid_info = phba->cid_array_info[ulp_num]; 4089 4090 ptr_cid_info->cid_alloc = 0; 4091 ptr_cid_info->cid_free = 0; 4092 } 4093 } 4094 return 0; 4095 4096 free_memory: 4097 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4098 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4099 ptr_cid_info = phba->cid_array_info[ulp_num]; 4100 4101 if (ptr_cid_info) { 4102 kfree(ptr_cid_info->cid_array); 4103 kfree(ptr_cid_info); 4104 phba->cid_array_info[ulp_num] = NULL; 4105 } 4106 } 4107 } 4108 4109 return ret; 4110 } 4111 4112 static void hwi_enable_intr(struct beiscsi_hba *phba) 4113 { 4114 struct be_ctrl_info *ctrl = &phba->ctrl; 4115 struct hwi_controller *phwi_ctrlr; 4116 struct hwi_context_memory *phwi_context; 4117 struct be_queue_info *eq; 4118 u8 __iomem *addr; 4119 u32 reg, i; 4120 u32 enabled; 4121 4122 phwi_ctrlr = phba->phwi_ctrlr; 4123 phwi_context = phwi_ctrlr->phwi_ctxt; 4124 4125 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4126 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4127 reg = ioread32(addr); 4128 4129 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4130 if (!enabled) { 4131 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4132 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4133 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4134 iowrite32(reg, addr); 4135 } 4136 4137 if (!phba->pcidev->msix_enabled) { 4138 eq = &phwi_context->be_eq[0].q; 4139 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4140 "BM_%d : eq->id=%d\n", eq->id); 4141 4142 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4143 } else { 4144 for (i = 0; i <= phba->num_cpus; i++) { 4145 eq = &phwi_context->be_eq[i].q; 4146 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4147 "BM_%d : eq->id=%d\n", eq->id); 4148 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4149 } 4150 } 4151 } 4152 4153 static void hwi_disable_intr(struct beiscsi_hba *phba) 4154 { 4155 struct be_ctrl_info *ctrl = &phba->ctrl; 4156 4157 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4158 u32 reg = ioread32(addr); 4159 4160 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4161 if (enabled) { 4162 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4163 iowrite32(reg, addr); 4164 } else 4165 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4166 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4167 } 4168 4169 static int beiscsi_init_port(struct beiscsi_hba *phba) 4170 { 4171 int ret; 4172 4173 ret = hwi_init_controller(phba); 4174 if (ret < 0) { 4175 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4176 "BM_%d : init controller failed\n"); 4177 return ret; 4178 } 4179 ret = beiscsi_init_sgl_handle(phba); 4180 if (ret < 0) { 4181 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4182 "BM_%d : init sgl handles failed\n"); 4183 goto cleanup_port; 4184 } 4185 4186 ret = hba_setup_cid_tbls(phba); 4187 if (ret < 0) { 4188 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4189 "BM_%d : setup CID table failed\n"); 4190 kfree(phba->io_sgl_hndl_base); 4191 kfree(phba->eh_sgl_hndl_base); 4192 goto cleanup_port; 4193 } 4194 return ret; 4195 4196 cleanup_port: 4197 hwi_cleanup_port(phba); 4198 return ret; 4199 } 4200 4201 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4202 { 4203 struct ulp_cid_info *ptr_cid_info = NULL; 4204 int ulp_num; 4205 4206 kfree(phba->io_sgl_hndl_base); 4207 kfree(phba->eh_sgl_hndl_base); 4208 kfree(phba->ep_array); 4209 kfree(phba->conn_table); 4210 4211 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4212 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4213 ptr_cid_info = phba->cid_array_info[ulp_num]; 4214 4215 if (ptr_cid_info) { 4216 kfree(ptr_cid_info->cid_array); 4217 kfree(ptr_cid_info); 4218 phba->cid_array_info[ulp_num] = NULL; 4219 } 4220 } 4221 } 4222 } 4223 4224 /** 4225 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4226 * @beiscsi_conn: ptr to the conn to be cleaned up 4227 * @task: ptr to iscsi_task resource to be freed. 4228 * 4229 * Free driver mgmt resources binded to CXN. 4230 **/ 4231 void 4232 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4233 struct iscsi_task *task) 4234 { 4235 struct beiscsi_io_task *io_task; 4236 struct beiscsi_hba *phba = beiscsi_conn->phba; 4237 struct hwi_wrb_context *pwrb_context; 4238 struct hwi_controller *phwi_ctrlr; 4239 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4240 beiscsi_conn->beiscsi_conn_cid); 4241 4242 phwi_ctrlr = phba->phwi_ctrlr; 4243 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4244 4245 io_task = task->dd_data; 4246 4247 if (io_task->pwrb_handle) { 4248 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4249 io_task->pwrb_handle = NULL; 4250 } 4251 4252 if (io_task->psgl_handle) { 4253 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4254 io_task->psgl_handle = NULL; 4255 } 4256 4257 if (io_task->mtask_addr) { 4258 pci_unmap_single(phba->pcidev, 4259 io_task->mtask_addr, 4260 io_task->mtask_data_count, 4261 PCI_DMA_TODEVICE); 4262 io_task->mtask_addr = 0; 4263 } 4264 } 4265 4266 /** 4267 * beiscsi_cleanup_task()- Free driver resources of the task 4268 * @task: ptr to the iscsi task 4269 * 4270 **/ 4271 static void beiscsi_cleanup_task(struct iscsi_task *task) 4272 { 4273 struct beiscsi_io_task *io_task = task->dd_data; 4274 struct iscsi_conn *conn = task->conn; 4275 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4276 struct beiscsi_hba *phba = beiscsi_conn->phba; 4277 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4278 struct hwi_wrb_context *pwrb_context; 4279 struct hwi_controller *phwi_ctrlr; 4280 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4281 beiscsi_conn->beiscsi_conn_cid); 4282 4283 phwi_ctrlr = phba->phwi_ctrlr; 4284 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4285 4286 if (io_task->cmd_bhs) { 4287 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4288 io_task->bhs_pa.u.a64.address); 4289 io_task->cmd_bhs = NULL; 4290 task->hdr = NULL; 4291 } 4292 4293 if (task->sc) { 4294 if (io_task->pwrb_handle) { 4295 free_wrb_handle(phba, pwrb_context, 4296 io_task->pwrb_handle); 4297 io_task->pwrb_handle = NULL; 4298 } 4299 4300 if (io_task->psgl_handle) { 4301 free_io_sgl_handle(phba, io_task->psgl_handle); 4302 io_task->psgl_handle = NULL; 4303 } 4304 4305 if (io_task->scsi_cmnd) { 4306 if (io_task->num_sg) 4307 scsi_dma_unmap(io_task->scsi_cmnd); 4308 io_task->scsi_cmnd = NULL; 4309 } 4310 } else { 4311 if (!beiscsi_conn->login_in_progress) 4312 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4313 } 4314 } 4315 4316 void 4317 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4318 struct beiscsi_offload_params *params) 4319 { 4320 struct wrb_handle *pwrb_handle; 4321 struct hwi_wrb_context *pwrb_context = NULL; 4322 struct beiscsi_hba *phba = beiscsi_conn->phba; 4323 struct iscsi_task *task = beiscsi_conn->task; 4324 struct iscsi_session *session = task->conn->session; 4325 u32 doorbell = 0; 4326 4327 /* 4328 * We can always use 0 here because it is reserved by libiscsi for 4329 * login/startup related tasks. 4330 */ 4331 beiscsi_conn->login_in_progress = 0; 4332 spin_lock_bh(&session->back_lock); 4333 beiscsi_cleanup_task(task); 4334 spin_unlock_bh(&session->back_lock); 4335 4336 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4337 &pwrb_context); 4338 4339 /* Check for the adapter family */ 4340 if (is_chip_be2_be3r(phba)) 4341 beiscsi_offload_cxn_v0(params, pwrb_handle, 4342 phba->init_mem, 4343 pwrb_context); 4344 else 4345 beiscsi_offload_cxn_v2(params, pwrb_handle, 4346 pwrb_context); 4347 4348 be_dws_le_to_cpu(pwrb_handle->pwrb, 4349 sizeof(struct iscsi_target_context_update_wrb)); 4350 4351 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4352 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4353 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4354 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4355 iowrite32(doorbell, phba->db_va + 4356 beiscsi_conn->doorbell_offset); 4357 4358 /* 4359 * There is no completion for CONTEXT_UPDATE. The completion of next 4360 * WRB posted guarantees FW's processing and DMA'ing of it. 4361 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4362 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4363 */ 4364 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4365 phba->params.wrbs_per_cxn); 4366 beiscsi_log(phba, KERN_INFO, 4367 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4368 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4369 pwrb_handle, pwrb_context->free_index, 4370 pwrb_context->wrb_handles_available); 4371 } 4372 4373 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4374 int *index, int *age) 4375 { 4376 *index = (int)itt; 4377 if (age) 4378 *age = conn->session->age; 4379 } 4380 4381 /** 4382 * beiscsi_alloc_pdu - allocates pdu and related resources 4383 * @task: libiscsi task 4384 * @opcode: opcode of pdu for task 4385 * 4386 * This is called with the session lock held. It will allocate 4387 * the wrb and sgl if needed for the command. And it will prep 4388 * the pdu's itt. beiscsi_parse_pdu will later translate 4389 * the pdu itt to the libiscsi task itt. 4390 */ 4391 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4392 { 4393 struct beiscsi_io_task *io_task = task->dd_data; 4394 struct iscsi_conn *conn = task->conn; 4395 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4396 struct beiscsi_hba *phba = beiscsi_conn->phba; 4397 struct hwi_wrb_context *pwrb_context; 4398 struct hwi_controller *phwi_ctrlr; 4399 itt_t itt; 4400 uint16_t cri_index = 0; 4401 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4402 dma_addr_t paddr; 4403 4404 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4405 GFP_ATOMIC, &paddr); 4406 if (!io_task->cmd_bhs) 4407 return -ENOMEM; 4408 io_task->bhs_pa.u.a64.address = paddr; 4409 io_task->libiscsi_itt = (itt_t)task->itt; 4410 io_task->conn = beiscsi_conn; 4411 4412 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4413 task->hdr_max = sizeof(struct be_cmd_bhs); 4414 io_task->psgl_handle = NULL; 4415 io_task->pwrb_handle = NULL; 4416 4417 if (task->sc) { 4418 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4419 if (!io_task->psgl_handle) { 4420 beiscsi_log(phba, KERN_ERR, 4421 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4422 "BM_%d : Alloc of IO_SGL_ICD Failed" 4423 "for the CID : %d\n", 4424 beiscsi_conn->beiscsi_conn_cid); 4425 goto free_hndls; 4426 } 4427 io_task->pwrb_handle = alloc_wrb_handle(phba, 4428 beiscsi_conn->beiscsi_conn_cid, 4429 &io_task->pwrb_context); 4430 if (!io_task->pwrb_handle) { 4431 beiscsi_log(phba, KERN_ERR, 4432 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4433 "BM_%d : Alloc of WRB_HANDLE Failed" 4434 "for the CID : %d\n", 4435 beiscsi_conn->beiscsi_conn_cid); 4436 goto free_io_hndls; 4437 } 4438 } else { 4439 io_task->scsi_cmnd = NULL; 4440 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4441 beiscsi_conn->task = task; 4442 if (!beiscsi_conn->login_in_progress) { 4443 io_task->psgl_handle = (struct sgl_handle *) 4444 alloc_mgmt_sgl_handle(phba); 4445 if (!io_task->psgl_handle) { 4446 beiscsi_log(phba, KERN_ERR, 4447 BEISCSI_LOG_IO | 4448 BEISCSI_LOG_CONFIG, 4449 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4450 "for the CID : %d\n", 4451 beiscsi_conn-> 4452 beiscsi_conn_cid); 4453 goto free_hndls; 4454 } 4455 4456 beiscsi_conn->login_in_progress = 1; 4457 beiscsi_conn->plogin_sgl_handle = 4458 io_task->psgl_handle; 4459 io_task->pwrb_handle = 4460 alloc_wrb_handle(phba, 4461 beiscsi_conn->beiscsi_conn_cid, 4462 &io_task->pwrb_context); 4463 if (!io_task->pwrb_handle) { 4464 beiscsi_log(phba, KERN_ERR, 4465 BEISCSI_LOG_IO | 4466 BEISCSI_LOG_CONFIG, 4467 "BM_%d : Alloc of WRB_HANDLE Failed" 4468 "for the CID : %d\n", 4469 beiscsi_conn-> 4470 beiscsi_conn_cid); 4471 goto free_mgmt_hndls; 4472 } 4473 beiscsi_conn->plogin_wrb_handle = 4474 io_task->pwrb_handle; 4475 4476 } else { 4477 io_task->psgl_handle = 4478 beiscsi_conn->plogin_sgl_handle; 4479 io_task->pwrb_handle = 4480 beiscsi_conn->plogin_wrb_handle; 4481 } 4482 } else { 4483 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4484 if (!io_task->psgl_handle) { 4485 beiscsi_log(phba, KERN_ERR, 4486 BEISCSI_LOG_IO | 4487 BEISCSI_LOG_CONFIG, 4488 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4489 "for the CID : %d\n", 4490 beiscsi_conn-> 4491 beiscsi_conn_cid); 4492 goto free_hndls; 4493 } 4494 io_task->pwrb_handle = 4495 alloc_wrb_handle(phba, 4496 beiscsi_conn->beiscsi_conn_cid, 4497 &io_task->pwrb_context); 4498 if (!io_task->pwrb_handle) { 4499 beiscsi_log(phba, KERN_ERR, 4500 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4501 "BM_%d : Alloc of WRB_HANDLE Failed" 4502 "for the CID : %d\n", 4503 beiscsi_conn->beiscsi_conn_cid); 4504 goto free_mgmt_hndls; 4505 } 4506 4507 } 4508 } 4509 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4510 wrb_index << 16) | (unsigned int) 4511 (io_task->psgl_handle->sgl_index)); 4512 io_task->pwrb_handle->pio_handle = task; 4513 4514 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4515 return 0; 4516 4517 free_io_hndls: 4518 free_io_sgl_handle(phba, io_task->psgl_handle); 4519 goto free_hndls; 4520 free_mgmt_hndls: 4521 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4522 io_task->psgl_handle = NULL; 4523 free_hndls: 4524 phwi_ctrlr = phba->phwi_ctrlr; 4525 cri_index = BE_GET_CRI_FROM_CID( 4526 beiscsi_conn->beiscsi_conn_cid); 4527 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4528 if (io_task->pwrb_handle) 4529 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4530 io_task->pwrb_handle = NULL; 4531 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4532 io_task->bhs_pa.u.a64.address); 4533 io_task->cmd_bhs = NULL; 4534 return -ENOMEM; 4535 } 4536 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4537 unsigned int num_sg, unsigned int xferlen, 4538 unsigned int writedir) 4539 { 4540 4541 struct beiscsi_io_task *io_task = task->dd_data; 4542 struct iscsi_conn *conn = task->conn; 4543 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4544 struct beiscsi_hba *phba = beiscsi_conn->phba; 4545 struct iscsi_wrb *pwrb = NULL; 4546 unsigned int doorbell = 0; 4547 4548 pwrb = io_task->pwrb_handle->pwrb; 4549 4550 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4551 4552 if (writedir) { 4553 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4554 INI_WR_CMD); 4555 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4556 } else { 4557 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4558 INI_RD_CMD); 4559 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4560 } 4561 4562 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4563 type, pwrb); 4564 4565 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4566 cpu_to_be16(*(unsigned short *) 4567 &io_task->cmd_bhs->iscsi_hdr.lun)); 4568 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4569 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4570 io_task->pwrb_handle->wrb_index); 4571 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4572 be32_to_cpu(task->cmdsn)); 4573 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4574 io_task->psgl_handle->sgl_index); 4575 4576 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4577 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4578 io_task->pwrb_handle->wrb_index); 4579 if (io_task->pwrb_context->plast_wrb) 4580 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4581 io_task->pwrb_context->plast_wrb, 4582 io_task->pwrb_handle->wrb_index); 4583 io_task->pwrb_context->plast_wrb = pwrb; 4584 4585 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4586 4587 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4588 doorbell |= (io_task->pwrb_handle->wrb_index & 4589 DB_DEF_PDU_WRB_INDEX_MASK) << 4590 DB_DEF_PDU_WRB_INDEX_SHIFT; 4591 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4592 iowrite32(doorbell, phba->db_va + 4593 beiscsi_conn->doorbell_offset); 4594 return 0; 4595 } 4596 4597 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4598 unsigned int num_sg, unsigned int xferlen, 4599 unsigned int writedir) 4600 { 4601 4602 struct beiscsi_io_task *io_task = task->dd_data; 4603 struct iscsi_conn *conn = task->conn; 4604 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4605 struct beiscsi_hba *phba = beiscsi_conn->phba; 4606 struct iscsi_wrb *pwrb = NULL; 4607 unsigned int doorbell = 0; 4608 4609 pwrb = io_task->pwrb_handle->pwrb; 4610 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4611 4612 if (writedir) { 4613 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4614 INI_WR_CMD); 4615 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4616 } else { 4617 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4618 INI_RD_CMD); 4619 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4620 } 4621 4622 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4623 type, pwrb); 4624 4625 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4626 cpu_to_be16(*(unsigned short *) 4627 &io_task->cmd_bhs->iscsi_hdr.lun)); 4628 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4629 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4630 io_task->pwrb_handle->wrb_index); 4631 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4632 be32_to_cpu(task->cmdsn)); 4633 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4634 io_task->psgl_handle->sgl_index); 4635 4636 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4637 4638 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4639 io_task->pwrb_handle->wrb_index); 4640 if (io_task->pwrb_context->plast_wrb) 4641 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4642 io_task->pwrb_context->plast_wrb, 4643 io_task->pwrb_handle->wrb_index); 4644 io_task->pwrb_context->plast_wrb = pwrb; 4645 4646 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4647 4648 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4649 doorbell |= (io_task->pwrb_handle->wrb_index & 4650 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4651 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4652 4653 iowrite32(doorbell, phba->db_va + 4654 beiscsi_conn->doorbell_offset); 4655 return 0; 4656 } 4657 4658 static int beiscsi_mtask(struct iscsi_task *task) 4659 { 4660 struct beiscsi_io_task *io_task = task->dd_data; 4661 struct iscsi_conn *conn = task->conn; 4662 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4663 struct beiscsi_hba *phba = beiscsi_conn->phba; 4664 struct iscsi_wrb *pwrb = NULL; 4665 unsigned int doorbell = 0; 4666 unsigned int cid; 4667 unsigned int pwrb_typeoffset = 0; 4668 int ret = 0; 4669 4670 cid = beiscsi_conn->beiscsi_conn_cid; 4671 pwrb = io_task->pwrb_handle->pwrb; 4672 4673 if (is_chip_be2_be3r(phba)) { 4674 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4675 be32_to_cpu(task->cmdsn)); 4676 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4677 io_task->pwrb_handle->wrb_index); 4678 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4679 io_task->psgl_handle->sgl_index); 4680 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4681 task->data_count); 4682 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4683 io_task->pwrb_handle->wrb_index); 4684 if (io_task->pwrb_context->plast_wrb) 4685 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4686 io_task->pwrb_context->plast_wrb, 4687 io_task->pwrb_handle->wrb_index); 4688 io_task->pwrb_context->plast_wrb = pwrb; 4689 4690 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4691 } else { 4692 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4693 be32_to_cpu(task->cmdsn)); 4694 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4695 io_task->pwrb_handle->wrb_index); 4696 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4697 io_task->psgl_handle->sgl_index); 4698 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4699 task->data_count); 4700 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4701 io_task->pwrb_handle->wrb_index); 4702 if (io_task->pwrb_context->plast_wrb) 4703 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4704 io_task->pwrb_context->plast_wrb, 4705 io_task->pwrb_handle->wrb_index); 4706 io_task->pwrb_context->plast_wrb = pwrb; 4707 4708 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4709 } 4710 4711 4712 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4713 case ISCSI_OP_LOGIN: 4714 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4715 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4716 ret = hwi_write_buffer(pwrb, task); 4717 break; 4718 case ISCSI_OP_NOOP_OUT: 4719 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4720 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4721 if (is_chip_be2_be3r(phba)) 4722 AMAP_SET_BITS(struct amap_iscsi_wrb, 4723 dmsg, pwrb, 1); 4724 else 4725 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4726 dmsg, pwrb, 1); 4727 } else { 4728 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4729 if (is_chip_be2_be3r(phba)) 4730 AMAP_SET_BITS(struct amap_iscsi_wrb, 4731 dmsg, pwrb, 0); 4732 else 4733 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4734 dmsg, pwrb, 0); 4735 } 4736 ret = hwi_write_buffer(pwrb, task); 4737 break; 4738 case ISCSI_OP_TEXT: 4739 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4740 ret = hwi_write_buffer(pwrb, task); 4741 break; 4742 case ISCSI_OP_SCSI_TMFUNC: 4743 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4744 ret = hwi_write_buffer(pwrb, task); 4745 break; 4746 case ISCSI_OP_LOGOUT: 4747 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4748 ret = hwi_write_buffer(pwrb, task); 4749 break; 4750 4751 default: 4752 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4753 "BM_%d : opcode =%d Not supported\n", 4754 task->hdr->opcode & ISCSI_OPCODE_MASK); 4755 4756 return -EINVAL; 4757 } 4758 4759 if (ret) 4760 return ret; 4761 4762 /* Set the task type */ 4763 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4764 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4765 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4766 4767 doorbell |= cid & DB_WRB_POST_CID_MASK; 4768 doorbell |= (io_task->pwrb_handle->wrb_index & 4769 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4770 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4771 iowrite32(doorbell, phba->db_va + 4772 beiscsi_conn->doorbell_offset); 4773 return 0; 4774 } 4775 4776 static int beiscsi_task_xmit(struct iscsi_task *task) 4777 { 4778 struct beiscsi_io_task *io_task = task->dd_data; 4779 struct scsi_cmnd *sc = task->sc; 4780 struct beiscsi_hba *phba; 4781 struct scatterlist *sg; 4782 int num_sg; 4783 unsigned int writedir = 0, xferlen = 0; 4784 4785 phba = io_task->conn->phba; 4786 /** 4787 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4788 * operational if FW still gets heartbeat from EP FW. Is management 4789 * path really needed to continue further? 4790 */ 4791 if (!beiscsi_hba_is_online(phba)) 4792 return -EIO; 4793 4794 if (!io_task->conn->login_in_progress) 4795 task->hdr->exp_statsn = 0; 4796 4797 if (!sc) 4798 return beiscsi_mtask(task); 4799 4800 io_task->scsi_cmnd = sc; 4801 io_task->num_sg = 0; 4802 num_sg = scsi_dma_map(sc); 4803 if (num_sg < 0) { 4804 beiscsi_log(phba, KERN_ERR, 4805 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4806 "BM_%d : scsi_dma_map Failed " 4807 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4808 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4809 io_task->libiscsi_itt, scsi_bufflen(sc)); 4810 4811 return num_sg; 4812 } 4813 /** 4814 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4815 * For management task, cleanup_task checks mtask_addr before unmapping. 4816 */ 4817 io_task->num_sg = num_sg; 4818 xferlen = scsi_bufflen(sc); 4819 sg = scsi_sglist(sc); 4820 if (sc->sc_data_direction == DMA_TO_DEVICE) 4821 writedir = 1; 4822 else 4823 writedir = 0; 4824 4825 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4826 } 4827 4828 /** 4829 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4830 * @job: job to handle 4831 */ 4832 static int beiscsi_bsg_request(struct bsg_job *job) 4833 { 4834 struct Scsi_Host *shost; 4835 struct beiscsi_hba *phba; 4836 struct iscsi_bsg_request *bsg_req = job->request; 4837 int rc = -EINVAL; 4838 unsigned int tag; 4839 struct be_dma_mem nonemb_cmd; 4840 struct be_cmd_resp_hdr *resp; 4841 struct iscsi_bsg_reply *bsg_reply = job->reply; 4842 unsigned short status, extd_status; 4843 4844 shost = iscsi_job_to_shost(job); 4845 phba = iscsi_host_priv(shost); 4846 4847 if (!beiscsi_hba_is_online(phba)) { 4848 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4849 "BM_%d : HBA in error 0x%lx\n", phba->state); 4850 return -ENXIO; 4851 } 4852 4853 switch (bsg_req->msgcode) { 4854 case ISCSI_BSG_HST_VENDOR: 4855 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4856 job->request_payload.payload_len, 4857 &nonemb_cmd.dma); 4858 if (nonemb_cmd.va == NULL) { 4859 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4860 "BM_%d : Failed to allocate memory for " 4861 "beiscsi_bsg_request\n"); 4862 return -ENOMEM; 4863 } 4864 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4865 &nonemb_cmd); 4866 if (!tag) { 4867 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4868 "BM_%d : MBX Tag Allocation Failed\n"); 4869 4870 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4871 nonemb_cmd.va, nonemb_cmd.dma); 4872 return -EAGAIN; 4873 } 4874 4875 rc = wait_event_interruptible_timeout( 4876 phba->ctrl.mcc_wait[tag], 4877 phba->ctrl.mcc_tag_status[tag], 4878 msecs_to_jiffies( 4879 BEISCSI_HOST_MBX_TIMEOUT)); 4880 4881 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4882 clear_bit(MCC_TAG_STATE_RUNNING, 4883 &phba->ctrl.ptag_state[tag].tag_state); 4884 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4885 nonemb_cmd.va, nonemb_cmd.dma); 4886 return -EIO; 4887 } 4888 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4889 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4890 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4891 free_mcc_wrb(&phba->ctrl, tag); 4892 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4893 sg_copy_from_buffer(job->reply_payload.sg_list, 4894 job->reply_payload.sg_cnt, 4895 nonemb_cmd.va, (resp->response_length 4896 + sizeof(*resp))); 4897 bsg_reply->reply_payload_rcv_len = resp->response_length; 4898 bsg_reply->result = status; 4899 bsg_job_done(job, bsg_reply->result, 4900 bsg_reply->reply_payload_rcv_len); 4901 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4902 nonemb_cmd.va, nonemb_cmd.dma); 4903 if (status || extd_status) { 4904 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4905 "BM_%d : MBX Cmd Failed" 4906 " status = %d extd_status = %d\n", 4907 status, extd_status); 4908 4909 return -EIO; 4910 } else { 4911 rc = 0; 4912 } 4913 break; 4914 4915 default: 4916 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4917 "BM_%d : Unsupported bsg command: 0x%x\n", 4918 bsg_req->msgcode); 4919 break; 4920 } 4921 4922 return rc; 4923 } 4924 4925 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4926 { 4927 /* Set the logging parameter */ 4928 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4929 } 4930 4931 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4932 { 4933 if (phba->boot_struct.boot_kset) 4934 return; 4935 4936 /* skip if boot work is already in progress */ 4937 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4938 return; 4939 4940 phba->boot_struct.retry = 3; 4941 phba->boot_struct.tag = 0; 4942 phba->boot_struct.s_handle = s_handle; 4943 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4944 schedule_work(&phba->boot_work); 4945 } 4946 4947 /** 4948 * Boot flag info for iscsi-utilities 4949 * Bit 0 Block valid flag 4950 * Bit 1 Firmware booting selected 4951 */ 4952 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4953 4954 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4955 { 4956 struct beiscsi_hba *phba = data; 4957 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4958 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4959 char *str = buf; 4960 int rc = -EPERM; 4961 4962 switch (type) { 4963 case ISCSI_BOOT_TGT_NAME: 4964 rc = sprintf(buf, "%.*s\n", 4965 (int)strlen(boot_sess->target_name), 4966 (char *)&boot_sess->target_name); 4967 break; 4968 case ISCSI_BOOT_TGT_IP_ADDR: 4969 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4970 rc = sprintf(buf, "%pI4\n", 4971 (char *)&boot_conn->dest_ipaddr.addr); 4972 else 4973 rc = sprintf(str, "%pI6\n", 4974 (char *)&boot_conn->dest_ipaddr.addr); 4975 break; 4976 case ISCSI_BOOT_TGT_PORT: 4977 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4978 break; 4979 4980 case ISCSI_BOOT_TGT_CHAP_NAME: 4981 rc = sprintf(str, "%.*s\n", 4982 boot_conn->negotiated_login_options.auth_data.chap. 4983 target_chap_name_length, 4984 (char *)&boot_conn->negotiated_login_options. 4985 auth_data.chap.target_chap_name); 4986 break; 4987 case ISCSI_BOOT_TGT_CHAP_SECRET: 4988 rc = sprintf(str, "%.*s\n", 4989 boot_conn->negotiated_login_options.auth_data.chap. 4990 target_secret_length, 4991 (char *)&boot_conn->negotiated_login_options. 4992 auth_data.chap.target_secret); 4993 break; 4994 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4995 rc = sprintf(str, "%.*s\n", 4996 boot_conn->negotiated_login_options.auth_data.chap. 4997 intr_chap_name_length, 4998 (char *)&boot_conn->negotiated_login_options. 4999 auth_data.chap.intr_chap_name); 5000 break; 5001 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5002 rc = sprintf(str, "%.*s\n", 5003 boot_conn->negotiated_login_options.auth_data.chap. 5004 intr_secret_length, 5005 (char *)&boot_conn->negotiated_login_options. 5006 auth_data.chap.intr_secret); 5007 break; 5008 case ISCSI_BOOT_TGT_FLAGS: 5009 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5010 break; 5011 case ISCSI_BOOT_TGT_NIC_ASSOC: 5012 rc = sprintf(str, "0\n"); 5013 break; 5014 } 5015 return rc; 5016 } 5017 5018 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 5019 { 5020 struct beiscsi_hba *phba = data; 5021 char *str = buf; 5022 int rc = -EPERM; 5023 5024 switch (type) { 5025 case ISCSI_BOOT_INI_INITIATOR_NAME: 5026 rc = sprintf(str, "%s\n", 5027 phba->boot_struct.boot_sess.initiator_iscsiname); 5028 break; 5029 } 5030 return rc; 5031 } 5032 5033 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5034 { 5035 struct beiscsi_hba *phba = data; 5036 char *str = buf; 5037 int rc = -EPERM; 5038 5039 switch (type) { 5040 case ISCSI_BOOT_ETH_FLAGS: 5041 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5042 break; 5043 case ISCSI_BOOT_ETH_INDEX: 5044 rc = sprintf(str, "0\n"); 5045 break; 5046 case ISCSI_BOOT_ETH_MAC: 5047 rc = beiscsi_get_macaddr(str, phba); 5048 break; 5049 } 5050 return rc; 5051 } 5052 5053 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5054 { 5055 umode_t rc = 0; 5056 5057 switch (type) { 5058 case ISCSI_BOOT_TGT_NAME: 5059 case ISCSI_BOOT_TGT_IP_ADDR: 5060 case ISCSI_BOOT_TGT_PORT: 5061 case ISCSI_BOOT_TGT_CHAP_NAME: 5062 case ISCSI_BOOT_TGT_CHAP_SECRET: 5063 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5064 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5065 case ISCSI_BOOT_TGT_NIC_ASSOC: 5066 case ISCSI_BOOT_TGT_FLAGS: 5067 rc = S_IRUGO; 5068 break; 5069 } 5070 return rc; 5071 } 5072 5073 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5074 { 5075 umode_t rc = 0; 5076 5077 switch (type) { 5078 case ISCSI_BOOT_INI_INITIATOR_NAME: 5079 rc = S_IRUGO; 5080 break; 5081 } 5082 return rc; 5083 } 5084 5085 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5086 { 5087 umode_t rc = 0; 5088 5089 switch (type) { 5090 case ISCSI_BOOT_ETH_FLAGS: 5091 case ISCSI_BOOT_ETH_MAC: 5092 case ISCSI_BOOT_ETH_INDEX: 5093 rc = S_IRUGO; 5094 break; 5095 } 5096 return rc; 5097 } 5098 5099 static void beiscsi_boot_kobj_release(void *data) 5100 { 5101 struct beiscsi_hba *phba = data; 5102 5103 scsi_host_put(phba->shost); 5104 } 5105 5106 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5107 { 5108 struct boot_struct *bs = &phba->boot_struct; 5109 struct iscsi_boot_kobj *boot_kobj; 5110 5111 if (bs->boot_kset) { 5112 __beiscsi_log(phba, KERN_ERR, 5113 "BM_%d: boot_kset already created\n"); 5114 return 0; 5115 } 5116 5117 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5118 if (!bs->boot_kset) { 5119 __beiscsi_log(phba, KERN_ERR, 5120 "BM_%d: boot_kset alloc failed\n"); 5121 return -ENOMEM; 5122 } 5123 5124 /* get shost ref because the show function will refer phba */ 5125 if (!scsi_host_get(phba->shost)) 5126 goto free_kset; 5127 5128 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5129 beiscsi_show_boot_tgt_info, 5130 beiscsi_tgt_get_attr_visibility, 5131 beiscsi_boot_kobj_release); 5132 if (!boot_kobj) 5133 goto put_shost; 5134 5135 if (!scsi_host_get(phba->shost)) 5136 goto free_kset; 5137 5138 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5139 beiscsi_show_boot_ini_info, 5140 beiscsi_ini_get_attr_visibility, 5141 beiscsi_boot_kobj_release); 5142 if (!boot_kobj) 5143 goto put_shost; 5144 5145 if (!scsi_host_get(phba->shost)) 5146 goto free_kset; 5147 5148 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5149 beiscsi_show_boot_eth_info, 5150 beiscsi_eth_get_attr_visibility, 5151 beiscsi_boot_kobj_release); 5152 if (!boot_kobj) 5153 goto put_shost; 5154 5155 return 0; 5156 5157 put_shost: 5158 scsi_host_put(phba->shost); 5159 free_kset: 5160 iscsi_boot_destroy_kset(bs->boot_kset); 5161 bs->boot_kset = NULL; 5162 return -ENOMEM; 5163 } 5164 5165 static void beiscsi_boot_work(struct work_struct *work) 5166 { 5167 struct beiscsi_hba *phba = 5168 container_of(work, struct beiscsi_hba, boot_work); 5169 struct boot_struct *bs = &phba->boot_struct; 5170 unsigned int tag = 0; 5171 5172 if (!beiscsi_hba_is_online(phba)) 5173 return; 5174 5175 beiscsi_log(phba, KERN_INFO, 5176 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5177 "BM_%d : %s action %d\n", 5178 __func__, phba->boot_struct.action); 5179 5180 switch (phba->boot_struct.action) { 5181 case BEISCSI_BOOT_REOPEN_SESS: 5182 tag = beiscsi_boot_reopen_sess(phba); 5183 break; 5184 case BEISCSI_BOOT_GET_SHANDLE: 5185 tag = __beiscsi_boot_get_shandle(phba, 1); 5186 break; 5187 case BEISCSI_BOOT_GET_SINFO: 5188 tag = beiscsi_boot_get_sinfo(phba); 5189 break; 5190 case BEISCSI_BOOT_LOGOUT_SESS: 5191 tag = beiscsi_boot_logout_sess(phba); 5192 break; 5193 case BEISCSI_BOOT_CREATE_KSET: 5194 beiscsi_boot_create_kset(phba); 5195 /** 5196 * updated boot_kset is made visible to all before 5197 * ending the boot work. 5198 */ 5199 mb(); 5200 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5201 return; 5202 } 5203 if (!tag) { 5204 if (bs->retry--) 5205 schedule_work(&phba->boot_work); 5206 else 5207 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5208 } 5209 } 5210 5211 static void beiscsi_eqd_update_work(struct work_struct *work) 5212 { 5213 struct hwi_context_memory *phwi_context; 5214 struct be_set_eqd set_eqd[MAX_CPUS]; 5215 struct hwi_controller *phwi_ctrlr; 5216 struct be_eq_obj *pbe_eq; 5217 struct beiscsi_hba *phba; 5218 unsigned int pps, delta; 5219 struct be_aic_obj *aic; 5220 int eqd, i, num = 0; 5221 unsigned long now; 5222 5223 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5224 if (!beiscsi_hba_is_online(phba)) 5225 return; 5226 5227 phwi_ctrlr = phba->phwi_ctrlr; 5228 phwi_context = phwi_ctrlr->phwi_ctxt; 5229 5230 for (i = 0; i <= phba->num_cpus; i++) { 5231 aic = &phba->aic_obj[i]; 5232 pbe_eq = &phwi_context->be_eq[i]; 5233 now = jiffies; 5234 if (!aic->jiffies || time_before(now, aic->jiffies) || 5235 pbe_eq->cq_count < aic->eq_prev) { 5236 aic->jiffies = now; 5237 aic->eq_prev = pbe_eq->cq_count; 5238 continue; 5239 } 5240 delta = jiffies_to_msecs(now - aic->jiffies); 5241 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5242 eqd = (pps / 1500) << 2; 5243 5244 if (eqd < 8) 5245 eqd = 0; 5246 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5247 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5248 5249 aic->jiffies = now; 5250 aic->eq_prev = pbe_eq->cq_count; 5251 5252 if (eqd != aic->prev_eqd) { 5253 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5254 set_eqd[num].eq_id = pbe_eq->q.id; 5255 aic->prev_eqd = eqd; 5256 num++; 5257 } 5258 } 5259 if (num) 5260 /* completion of this is ignored */ 5261 beiscsi_modify_eq_delay(phba, set_eqd, num); 5262 5263 schedule_delayed_work(&phba->eqd_update, 5264 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5265 } 5266 5267 static void beiscsi_hw_tpe_check(struct timer_list *t) 5268 { 5269 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5270 u32 wait; 5271 5272 /* if not TPE, do nothing */ 5273 if (!beiscsi_detect_tpe(phba)) 5274 return; 5275 5276 /* wait default 4000ms before recovering */ 5277 wait = 4000; 5278 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5279 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5280 queue_delayed_work(phba->wq, &phba->recover_port, 5281 msecs_to_jiffies(wait)); 5282 } 5283 5284 static void beiscsi_hw_health_check(struct timer_list *t) 5285 { 5286 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5287 5288 beiscsi_detect_ue(phba); 5289 if (beiscsi_detect_ue(phba)) { 5290 __beiscsi_log(phba, KERN_ERR, 5291 "BM_%d : port in error: %lx\n", phba->state); 5292 /* sessions are no longer valid, so first fail the sessions */ 5293 queue_work(phba->wq, &phba->sess_work); 5294 5295 /* detect UER supported */ 5296 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5297 return; 5298 /* modify this timer to check TPE */ 5299 phba->hw_check.function = beiscsi_hw_tpe_check; 5300 } 5301 5302 mod_timer(&phba->hw_check, 5303 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5304 } 5305 5306 /* 5307 * beiscsi_enable_port()- Enables the disabled port. 5308 * Only port resources freed in disable function are reallocated. 5309 * This is called in HBA error handling path. 5310 * 5311 * @phba: Instance of driver private structure 5312 * 5313 **/ 5314 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5315 { 5316 struct hwi_context_memory *phwi_context; 5317 struct hwi_controller *phwi_ctrlr; 5318 struct be_eq_obj *pbe_eq; 5319 int ret, i; 5320 5321 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5322 __beiscsi_log(phba, KERN_ERR, 5323 "BM_%d : %s : port is online %lx\n", 5324 __func__, phba->state); 5325 return 0; 5326 } 5327 5328 ret = beiscsi_init_sliport(phba); 5329 if (ret) 5330 return ret; 5331 5332 be2iscsi_enable_msix(phba); 5333 5334 beiscsi_get_params(phba); 5335 beiscsi_set_host_data(phba); 5336 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5337 beiscsi_set_uer_feature(phba); 5338 5339 phba->shost->max_id = phba->params.cxns_per_ctrl; 5340 phba->shost->can_queue = phba->params.ios_per_ctrl; 5341 ret = beiscsi_init_port(phba); 5342 if (ret < 0) { 5343 __beiscsi_log(phba, KERN_ERR, 5344 "BM_%d : init port failed\n"); 5345 goto disable_msix; 5346 } 5347 5348 for (i = 0; i < MAX_MCC_CMD; i++) { 5349 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5350 phba->ctrl.mcc_tag[i] = i + 1; 5351 phba->ctrl.mcc_tag_status[i + 1] = 0; 5352 phba->ctrl.mcc_tag_available++; 5353 } 5354 5355 phwi_ctrlr = phba->phwi_ctrlr; 5356 phwi_context = phwi_ctrlr->phwi_ctxt; 5357 for (i = 0; i < phba->num_cpus; i++) { 5358 pbe_eq = &phwi_context->be_eq[i]; 5359 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5360 } 5361 5362 i = (phba->pcidev->msix_enabled) ? i : 0; 5363 /* Work item for MCC handling */ 5364 pbe_eq = &phwi_context->be_eq[i]; 5365 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5366 5367 ret = beiscsi_init_irqs(phba); 5368 if (ret < 0) { 5369 __beiscsi_log(phba, KERN_ERR, 5370 "BM_%d : setup IRQs failed %d\n", ret); 5371 goto cleanup_port; 5372 } 5373 hwi_enable_intr(phba); 5374 /* port operational: clear all error bits */ 5375 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5376 __beiscsi_log(phba, KERN_INFO, 5377 "BM_%d : port online: 0x%lx\n", phba->state); 5378 5379 /* start hw_check timer and eqd_update work */ 5380 schedule_delayed_work(&phba->eqd_update, 5381 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5382 5383 /** 5384 * Timer function gets modified for TPE detection. 5385 * Always reinit to do health check first. 5386 */ 5387 phba->hw_check.function = beiscsi_hw_health_check; 5388 mod_timer(&phba->hw_check, 5389 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5390 return 0; 5391 5392 cleanup_port: 5393 for (i = 0; i < phba->num_cpus; i++) { 5394 pbe_eq = &phwi_context->be_eq[i]; 5395 irq_poll_disable(&pbe_eq->iopoll); 5396 } 5397 hwi_cleanup_port(phba); 5398 5399 disable_msix: 5400 pci_free_irq_vectors(phba->pcidev); 5401 return ret; 5402 } 5403 5404 /* 5405 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5406 * This is called in HBA error handling and driver removal. 5407 * @phba: Instance Priv structure 5408 * @unload: indicate driver is unloading 5409 * 5410 * Free the OS and HW resources held by the driver 5411 **/ 5412 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5413 { 5414 struct hwi_context_memory *phwi_context; 5415 struct hwi_controller *phwi_ctrlr; 5416 struct be_eq_obj *pbe_eq; 5417 unsigned int i; 5418 5419 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5420 return; 5421 5422 phwi_ctrlr = phba->phwi_ctrlr; 5423 phwi_context = phwi_ctrlr->phwi_ctxt; 5424 hwi_disable_intr(phba); 5425 beiscsi_free_irqs(phba); 5426 pci_free_irq_vectors(phba->pcidev); 5427 5428 for (i = 0; i < phba->num_cpus; i++) { 5429 pbe_eq = &phwi_context->be_eq[i]; 5430 irq_poll_disable(&pbe_eq->iopoll); 5431 } 5432 cancel_delayed_work_sync(&phba->eqd_update); 5433 cancel_work_sync(&phba->boot_work); 5434 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5435 if (!unload && beiscsi_hba_in_error(phba)) { 5436 pbe_eq = &phwi_context->be_eq[i]; 5437 cancel_work_sync(&pbe_eq->mcc_work); 5438 } 5439 hwi_cleanup_port(phba); 5440 beiscsi_cleanup_port(phba); 5441 } 5442 5443 static void beiscsi_sess_work(struct work_struct *work) 5444 { 5445 struct beiscsi_hba *phba; 5446 5447 phba = container_of(work, struct beiscsi_hba, sess_work); 5448 /* 5449 * This work gets scheduled only in case of HBA error. 5450 * Old sessions are gone so need to be re-established. 5451 * iscsi_session_failure needs process context hence this work. 5452 */ 5453 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5454 } 5455 5456 static void beiscsi_recover_port(struct work_struct *work) 5457 { 5458 struct beiscsi_hba *phba; 5459 5460 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5461 beiscsi_disable_port(phba, 0); 5462 beiscsi_enable_port(phba); 5463 } 5464 5465 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5466 pci_channel_state_t state) 5467 { 5468 struct beiscsi_hba *phba = NULL; 5469 5470 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5471 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5472 5473 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5474 "BM_%d : EEH error detected\n"); 5475 5476 /* first stop UE detection when PCI error detected */ 5477 del_timer_sync(&phba->hw_check); 5478 cancel_delayed_work_sync(&phba->recover_port); 5479 5480 /* sessions are no longer valid, so first fail the sessions */ 5481 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5482 beiscsi_disable_port(phba, 0); 5483 5484 if (state == pci_channel_io_perm_failure) { 5485 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5486 "BM_%d : EEH : State PERM Failure"); 5487 return PCI_ERS_RESULT_DISCONNECT; 5488 } 5489 5490 pci_disable_device(pdev); 5491 5492 /* The error could cause the FW to trigger a flash debug dump. 5493 * Resetting the card while flash dump is in progress 5494 * can cause it not to recover; wait for it to finish. 5495 * Wait only for first function as it is needed only once per 5496 * adapter. 5497 **/ 5498 if (pdev->devfn == 0) 5499 ssleep(30); 5500 5501 return PCI_ERS_RESULT_NEED_RESET; 5502 } 5503 5504 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5505 { 5506 struct beiscsi_hba *phba = NULL; 5507 int status = 0; 5508 5509 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5510 5511 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5512 "BM_%d : EEH Reset\n"); 5513 5514 status = pci_enable_device(pdev); 5515 if (status) 5516 return PCI_ERS_RESULT_DISCONNECT; 5517 5518 pci_set_master(pdev); 5519 pci_set_power_state(pdev, PCI_D0); 5520 pci_restore_state(pdev); 5521 5522 status = beiscsi_check_fw_rdy(phba); 5523 if (status) { 5524 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5525 "BM_%d : EEH Reset Completed\n"); 5526 } else { 5527 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5528 "BM_%d : EEH Reset Completion Failure\n"); 5529 return PCI_ERS_RESULT_DISCONNECT; 5530 } 5531 5532 pci_cleanup_aer_uncorrect_error_status(pdev); 5533 return PCI_ERS_RESULT_RECOVERED; 5534 } 5535 5536 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5537 { 5538 struct beiscsi_hba *phba; 5539 int ret; 5540 5541 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5542 pci_save_state(pdev); 5543 5544 ret = beiscsi_enable_port(phba); 5545 if (ret) 5546 __beiscsi_log(phba, KERN_ERR, 5547 "BM_%d : AER EEH resume failed\n"); 5548 } 5549 5550 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5551 const struct pci_device_id *id) 5552 { 5553 struct hwi_context_memory *phwi_context; 5554 struct hwi_controller *phwi_ctrlr; 5555 struct beiscsi_hba *phba = NULL; 5556 struct be_eq_obj *pbe_eq; 5557 unsigned int s_handle; 5558 char wq_name[20]; 5559 int ret, i; 5560 5561 ret = beiscsi_enable_pci(pcidev); 5562 if (ret < 0) { 5563 dev_err(&pcidev->dev, 5564 "beiscsi_dev_probe - Failed to enable pci device\n"); 5565 return ret; 5566 } 5567 5568 phba = beiscsi_hba_alloc(pcidev); 5569 if (!phba) { 5570 dev_err(&pcidev->dev, 5571 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5572 ret = -ENOMEM; 5573 goto disable_pci; 5574 } 5575 5576 /* Enable EEH reporting */ 5577 ret = pci_enable_pcie_error_reporting(pcidev); 5578 if (ret) 5579 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5580 "BM_%d : PCIe Error Reporting " 5581 "Enabling Failed\n"); 5582 5583 pci_save_state(pcidev); 5584 5585 /* Initialize Driver configuration Paramters */ 5586 beiscsi_hba_attrs_init(phba); 5587 5588 phba->mac_addr_set = false; 5589 5590 switch (pcidev->device) { 5591 case BE_DEVICE_ID1: 5592 case OC_DEVICE_ID1: 5593 case OC_DEVICE_ID2: 5594 phba->generation = BE_GEN2; 5595 phba->iotask_fn = beiscsi_iotask; 5596 dev_warn(&pcidev->dev, 5597 "Obsolete/Unsupported BE2 Adapter Family\n"); 5598 break; 5599 case BE_DEVICE_ID2: 5600 case OC_DEVICE_ID3: 5601 phba->generation = BE_GEN3; 5602 phba->iotask_fn = beiscsi_iotask; 5603 break; 5604 case OC_SKH_ID1: 5605 phba->generation = BE_GEN4; 5606 phba->iotask_fn = beiscsi_iotask_v2; 5607 break; 5608 default: 5609 phba->generation = 0; 5610 } 5611 5612 ret = be_ctrl_init(phba, pcidev); 5613 if (ret) { 5614 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5615 "BM_%d : be_ctrl_init failed\n"); 5616 goto free_hba; 5617 } 5618 5619 ret = beiscsi_init_sliport(phba); 5620 if (ret) 5621 goto free_hba; 5622 5623 spin_lock_init(&phba->io_sgl_lock); 5624 spin_lock_init(&phba->mgmt_sgl_lock); 5625 spin_lock_init(&phba->async_pdu_lock); 5626 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5627 if (ret != 0) { 5628 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5629 "BM_%d : Error getting fw config\n"); 5630 goto free_port; 5631 } 5632 beiscsi_get_port_name(&phba->ctrl, phba); 5633 beiscsi_get_params(phba); 5634 beiscsi_set_host_data(phba); 5635 beiscsi_set_uer_feature(phba); 5636 5637 be2iscsi_enable_msix(phba); 5638 5639 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5640 "BM_%d : num_cpus = %d\n", 5641 phba->num_cpus); 5642 5643 phba->shost->max_id = phba->params.cxns_per_ctrl; 5644 phba->shost->can_queue = phba->params.ios_per_ctrl; 5645 ret = beiscsi_get_memory(phba); 5646 if (ret < 0) { 5647 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5648 "BM_%d : alloc host mem failed\n"); 5649 goto free_port; 5650 } 5651 5652 ret = beiscsi_init_port(phba); 5653 if (ret < 0) { 5654 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5655 "BM_%d : init port failed\n"); 5656 beiscsi_free_mem(phba); 5657 goto free_port; 5658 } 5659 5660 for (i = 0; i < MAX_MCC_CMD; i++) { 5661 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5662 phba->ctrl.mcc_tag[i] = i + 1; 5663 phba->ctrl.mcc_tag_status[i + 1] = 0; 5664 phba->ctrl.mcc_tag_available++; 5665 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5666 sizeof(struct be_dma_mem)); 5667 } 5668 5669 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5670 5671 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5672 phba->shost->host_no); 5673 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5674 if (!phba->wq) { 5675 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5676 "BM_%d : beiscsi_dev_probe-" 5677 "Failed to allocate work queue\n"); 5678 ret = -ENOMEM; 5679 goto free_twq; 5680 } 5681 5682 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5683 5684 phwi_ctrlr = phba->phwi_ctrlr; 5685 phwi_context = phwi_ctrlr->phwi_ctxt; 5686 5687 for (i = 0; i < phba->num_cpus; i++) { 5688 pbe_eq = &phwi_context->be_eq[i]; 5689 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5690 } 5691 5692 i = (phba->pcidev->msix_enabled) ? i : 0; 5693 /* Work item for MCC handling */ 5694 pbe_eq = &phwi_context->be_eq[i]; 5695 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5696 5697 ret = beiscsi_init_irqs(phba); 5698 if (ret < 0) { 5699 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5700 "BM_%d : beiscsi_dev_probe-" 5701 "Failed to beiscsi_init_irqs\n"); 5702 goto disable_iopoll; 5703 } 5704 hwi_enable_intr(phba); 5705 5706 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5707 if (ret) 5708 goto free_irqs; 5709 5710 /* set online bit after port is operational */ 5711 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5712 __beiscsi_log(phba, KERN_INFO, 5713 "BM_%d : port online: 0x%lx\n", phba->state); 5714 5715 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5716 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5717 if (ret > 0) { 5718 beiscsi_start_boot_work(phba, s_handle); 5719 /** 5720 * Set this bit after starting the work to let 5721 * probe handle it first. 5722 * ASYNC event can too schedule this work. 5723 */ 5724 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5725 } 5726 5727 beiscsi_iface_create_default(phba); 5728 schedule_delayed_work(&phba->eqd_update, 5729 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5730 5731 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5732 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5733 /** 5734 * Start UE detection here. UE before this will cause stall in probe 5735 * and eventually fail the probe. 5736 */ 5737 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5738 mod_timer(&phba->hw_check, 5739 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5740 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5741 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5742 return 0; 5743 5744 free_irqs: 5745 hwi_disable_intr(phba); 5746 beiscsi_free_irqs(phba); 5747 disable_iopoll: 5748 for (i = 0; i < phba->num_cpus; i++) { 5749 pbe_eq = &phwi_context->be_eq[i]; 5750 irq_poll_disable(&pbe_eq->iopoll); 5751 } 5752 destroy_workqueue(phba->wq); 5753 free_twq: 5754 hwi_cleanup_port(phba); 5755 beiscsi_cleanup_port(phba); 5756 beiscsi_free_mem(phba); 5757 free_port: 5758 pci_free_consistent(phba->pcidev, 5759 phba->ctrl.mbox_mem_alloced.size, 5760 phba->ctrl.mbox_mem_alloced.va, 5761 phba->ctrl.mbox_mem_alloced.dma); 5762 beiscsi_unmap_pci_function(phba); 5763 free_hba: 5764 pci_disable_msix(phba->pcidev); 5765 pci_dev_put(phba->pcidev); 5766 iscsi_host_free(phba->shost); 5767 pci_set_drvdata(pcidev, NULL); 5768 disable_pci: 5769 pci_release_regions(pcidev); 5770 pci_disable_device(pcidev); 5771 return ret; 5772 } 5773 5774 static void beiscsi_remove(struct pci_dev *pcidev) 5775 { 5776 struct beiscsi_hba *phba = NULL; 5777 5778 phba = pci_get_drvdata(pcidev); 5779 if (!phba) { 5780 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5781 return; 5782 } 5783 5784 /* first stop UE detection before unloading */ 5785 del_timer_sync(&phba->hw_check); 5786 cancel_delayed_work_sync(&phba->recover_port); 5787 cancel_work_sync(&phba->sess_work); 5788 5789 beiscsi_iface_destroy_default(phba); 5790 iscsi_host_remove(phba->shost); 5791 beiscsi_disable_port(phba, 1); 5792 5793 /* after cancelling boot_work */ 5794 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5795 5796 /* free all resources */ 5797 destroy_workqueue(phba->wq); 5798 beiscsi_free_mem(phba); 5799 5800 /* ctrl uninit */ 5801 beiscsi_unmap_pci_function(phba); 5802 pci_free_consistent(phba->pcidev, 5803 phba->ctrl.mbox_mem_alloced.size, 5804 phba->ctrl.mbox_mem_alloced.va, 5805 phba->ctrl.mbox_mem_alloced.dma); 5806 5807 pci_dev_put(phba->pcidev); 5808 iscsi_host_free(phba->shost); 5809 pci_disable_pcie_error_reporting(pcidev); 5810 pci_set_drvdata(pcidev, NULL); 5811 pci_release_regions(pcidev); 5812 pci_disable_device(pcidev); 5813 } 5814 5815 5816 static struct pci_error_handlers beiscsi_eeh_handlers = { 5817 .error_detected = beiscsi_eeh_err_detected, 5818 .slot_reset = beiscsi_eeh_reset, 5819 .resume = beiscsi_eeh_resume, 5820 }; 5821 5822 struct iscsi_transport beiscsi_iscsi_transport = { 5823 .owner = THIS_MODULE, 5824 .name = DRV_NAME, 5825 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5826 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5827 .create_session = beiscsi_session_create, 5828 .destroy_session = beiscsi_session_destroy, 5829 .create_conn = beiscsi_conn_create, 5830 .bind_conn = beiscsi_conn_bind, 5831 .destroy_conn = iscsi_conn_teardown, 5832 .attr_is_visible = beiscsi_attr_is_visible, 5833 .set_iface_param = beiscsi_iface_set_param, 5834 .get_iface_param = beiscsi_iface_get_param, 5835 .set_param = beiscsi_set_param, 5836 .get_conn_param = iscsi_conn_get_param, 5837 .get_session_param = iscsi_session_get_param, 5838 .get_host_param = beiscsi_get_host_param, 5839 .start_conn = beiscsi_conn_start, 5840 .stop_conn = iscsi_conn_stop, 5841 .send_pdu = iscsi_conn_send_pdu, 5842 .xmit_task = beiscsi_task_xmit, 5843 .cleanup_task = beiscsi_cleanup_task, 5844 .alloc_pdu = beiscsi_alloc_pdu, 5845 .parse_pdu_itt = beiscsi_parse_pdu, 5846 .get_stats = beiscsi_conn_get_stats, 5847 .get_ep_param = beiscsi_ep_get_param, 5848 .ep_connect = beiscsi_ep_connect, 5849 .ep_poll = beiscsi_ep_poll, 5850 .ep_disconnect = beiscsi_ep_disconnect, 5851 .session_recovery_timedout = iscsi_session_recovery_timedout, 5852 .bsg_request = beiscsi_bsg_request, 5853 }; 5854 5855 static struct pci_driver beiscsi_pci_driver = { 5856 .name = DRV_NAME, 5857 .probe = beiscsi_dev_probe, 5858 .remove = beiscsi_remove, 5859 .id_table = beiscsi_pci_id_table, 5860 .err_handler = &beiscsi_eeh_handlers 5861 }; 5862 5863 static int __init beiscsi_module_init(void) 5864 { 5865 int ret; 5866 5867 beiscsi_scsi_transport = 5868 iscsi_register_transport(&beiscsi_iscsi_transport); 5869 if (!beiscsi_scsi_transport) { 5870 printk(KERN_ERR 5871 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5872 return -ENOMEM; 5873 } 5874 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5875 &beiscsi_iscsi_transport); 5876 5877 ret = pci_register_driver(&beiscsi_pci_driver); 5878 if (ret) { 5879 printk(KERN_ERR 5880 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5881 goto unregister_iscsi_transport; 5882 } 5883 return 0; 5884 5885 unregister_iscsi_transport: 5886 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5887 return ret; 5888 } 5889 5890 static void __exit beiscsi_module_exit(void) 5891 { 5892 pci_unregister_driver(&beiscsi_pci_driver); 5893 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5894 } 5895 5896 module_init(beiscsi_module_init); 5897 module_exit(beiscsi_module_exit); 5898