1 /* 2 * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI 3 * Host Bus Adapters. Refer to the README file included with this package 4 * for driver version and adapter compatibility. 5 * 6 * Copyright (c) 2018 Broadcom. All Rights Reserved. 7 * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as published 11 * by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful. ALL EXPRESS 14 * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY 15 * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, 16 * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH 17 * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 18 * See the GNU General Public License for more details, a copy of which 19 * can be found in the file COPYING included with this package. 20 * 21 * Contact Information: 22 * linux-drivers@broadcom.com 23 * 24 */ 25 26 #include <linux/reboot.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/interrupt.h> 30 #include <linux/blkdev.h> 31 #include <linux/pci.h> 32 #include <linux/string.h> 33 #include <linux/kernel.h> 34 #include <linux/semaphore.h> 35 #include <linux/iscsi_boot_sysfs.h> 36 #include <linux/module.h> 37 #include <linux/bsg-lib.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/libiscsi.h> 41 #include <scsi/scsi_bsg_iscsi.h> 42 #include <scsi/scsi_netlink.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 #include <scsi/scsi_transport.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi.h> 49 #include "be_main.h" 50 #include "be_iscsi.h" 51 #include "be_mgmt.h" 52 #include "be_cmds.h" 53 54 static unsigned int be_iopoll_budget = 10; 55 static unsigned int be_max_phys_size = 64; 56 static unsigned int enable_msix = 1; 57 58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 59 MODULE_VERSION(BUILD_STR); 60 MODULE_AUTHOR("Emulex Corporation"); 61 MODULE_LICENSE("GPL"); 62 module_param(be_iopoll_budget, int, 0); 63 module_param(enable_msix, int, 0); 64 module_param(be_max_phys_size, uint, S_IRUGO); 65 MODULE_PARM_DESC(be_max_phys_size, 66 "Maximum Size (In Kilobytes) of physically contiguous " 67 "memory that can be allocated. Range is 16 - 128"); 68 69 #define beiscsi_disp_param(_name)\ 70 static ssize_t \ 71 beiscsi_##_name##_disp(struct device *dev,\ 72 struct device_attribute *attrib, char *buf) \ 73 { \ 74 struct Scsi_Host *shost = class_to_shost(dev);\ 75 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 76 return snprintf(buf, PAGE_SIZE, "%d\n",\ 77 phba->attr_##_name);\ 78 } 79 80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 81 static int \ 82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 83 {\ 84 if (val >= _minval && val <= _maxval) {\ 85 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 86 "BA_%d : beiscsi_"#_name" updated "\ 87 "from 0x%x ==> 0x%x\n",\ 88 phba->attr_##_name, val); \ 89 phba->attr_##_name = val;\ 90 return 0;\ 91 } \ 92 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 93 "BA_%d beiscsi_"#_name" attribute "\ 94 "cannot be updated to 0x%x, "\ 95 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 96 return -EINVAL;\ 97 } 98 99 #define beiscsi_store_param(_name) \ 100 static ssize_t \ 101 beiscsi_##_name##_store(struct device *dev,\ 102 struct device_attribute *attr, const char *buf,\ 103 size_t count) \ 104 { \ 105 struct Scsi_Host *shost = class_to_shost(dev);\ 106 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 107 uint32_t param_val = 0;\ 108 if (!isdigit(buf[0]))\ 109 return -EINVAL;\ 110 if (sscanf(buf, "%i", ¶m_val) != 1)\ 111 return -EINVAL;\ 112 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 113 return strlen(buf);\ 114 else \ 115 return -EINVAL;\ 116 } 117 118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 119 static int \ 120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 121 { \ 122 if (val >= _minval && val <= _maxval) {\ 123 phba->attr_##_name = val;\ 124 return 0;\ 125 } \ 126 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 127 "BA_%d beiscsi_"#_name" attribute " \ 128 "cannot be updated to 0x%x, "\ 129 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 130 phba->attr_##_name = _defval;\ 131 return -EINVAL;\ 132 } 133 134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 135 static uint beiscsi_##_name = _defval;\ 136 module_param(beiscsi_##_name, uint, S_IRUGO);\ 137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 138 beiscsi_disp_param(_name)\ 139 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 140 beiscsi_store_param(_name)\ 141 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 142 static DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 143 beiscsi_##_name##_disp, beiscsi_##_name##_store) 144 145 /* 146 * When new log level added update MAX allowed value for log_enable 147 */ 148 BEISCSI_RW_ATTR(log_enable, 0x00, 149 0xFF, 0x00, "Enable logging Bit Mask\n" 150 "\t\t\t\tInitialization Events : 0x01\n" 151 "\t\t\t\tMailbox Events : 0x02\n" 152 "\t\t\t\tMiscellaneous Events : 0x04\n" 153 "\t\t\t\tError Handling : 0x08\n" 154 "\t\t\t\tIO Path Events : 0x10\n" 155 "\t\t\t\tConfiguration Path : 0x20\n" 156 "\t\t\t\tiSCSI Protocol : 0x40\n"); 157 158 static DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 159 static DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 160 static DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 161 static DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 162 static DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 163 beiscsi_active_session_disp, NULL); 164 static DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 165 beiscsi_free_session_disp, NULL); 166 167 static struct attribute *beiscsi_attrs[] = { 168 &dev_attr_beiscsi_log_enable.attr, 169 &dev_attr_beiscsi_drvr_ver.attr, 170 &dev_attr_beiscsi_adapter_family.attr, 171 &dev_attr_beiscsi_fw_ver.attr, 172 &dev_attr_beiscsi_active_session_count.attr, 173 &dev_attr_beiscsi_free_session_count.attr, 174 &dev_attr_beiscsi_phys_port.attr, 175 NULL, 176 }; 177 178 ATTRIBUTE_GROUPS(beiscsi); 179 180 static char const *cqe_desc[] = { 181 "RESERVED_DESC", 182 "SOL_CMD_COMPLETE", 183 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 184 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 185 "CXN_KILLED_BURST_LEN_MISMATCH", 186 "CXN_KILLED_AHS_RCVD", 187 "CXN_KILLED_HDR_DIGEST_ERR", 188 "CXN_KILLED_UNKNOWN_HDR", 189 "CXN_KILLED_STALE_ITT_TTT_RCVD", 190 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 191 "CXN_KILLED_RST_RCVD", 192 "CXN_KILLED_TIMED_OUT", 193 "CXN_KILLED_RST_SENT", 194 "CXN_KILLED_FIN_RCVD", 195 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 196 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 197 "CXN_KILLED_OVER_RUN_RESIDUAL", 198 "CXN_KILLED_UNDER_RUN_RESIDUAL", 199 "CMD_KILLED_INVALID_STATSN_RCVD", 200 "CMD_KILLED_INVALID_R2T_RCVD", 201 "CMD_CXN_KILLED_LUN_INVALID", 202 "CMD_CXN_KILLED_ICD_INVALID", 203 "CMD_CXN_KILLED_ITT_INVALID", 204 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 205 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 206 "CXN_INVALIDATE_NOTIFY", 207 "CXN_INVALIDATE_INDEX_NOTIFY", 208 "CMD_INVALIDATED_NOTIFY", 209 "UNSOL_HDR_NOTIFY", 210 "UNSOL_DATA_NOTIFY", 211 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 212 "DRIVERMSG_NOTIFY", 213 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 214 "SOL_CMD_KILLED_DIF_ERR", 215 "CXN_KILLED_SYN_RCVD", 216 "CXN_KILLED_IMM_DATA_RCVD" 217 }; 218 219 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 220 { 221 struct iscsi_task *abrt_task = iscsi_cmd(sc)->task; 222 struct iscsi_cls_session *cls_session; 223 struct beiscsi_io_task *abrt_io_task; 224 struct beiscsi_conn *beiscsi_conn; 225 struct iscsi_session *session; 226 struct invldt_cmd_tbl inv_tbl; 227 struct beiscsi_hba *phba; 228 struct iscsi_conn *conn; 229 int rc; 230 231 cls_session = starget_to_session(scsi_target(sc->device)); 232 session = cls_session->dd_data; 233 234 completion_check: 235 /* check if we raced, task just got cleaned up under us */ 236 spin_lock_bh(&session->back_lock); 237 if (!abrt_task || !abrt_task->sc) { 238 spin_unlock_bh(&session->back_lock); 239 return SUCCESS; 240 } 241 /* get a task ref till FW processes the req for the ICD used */ 242 if (!iscsi_get_task(abrt_task)) { 243 spin_unlock(&session->back_lock); 244 /* We are just about to call iscsi_free_task so wait for it. */ 245 udelay(5); 246 goto completion_check; 247 } 248 249 abrt_io_task = abrt_task->dd_data; 250 conn = abrt_task->conn; 251 beiscsi_conn = conn->dd_data; 252 phba = beiscsi_conn->phba; 253 /* mark WRB invalid which have been not processed by FW yet */ 254 if (is_chip_be2_be3r(phba)) { 255 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 256 abrt_io_task->pwrb_handle->pwrb, 1); 257 } else { 258 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 259 abrt_io_task->pwrb_handle->pwrb, 1); 260 } 261 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 262 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 263 spin_unlock_bh(&session->back_lock); 264 265 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 266 iscsi_put_task(abrt_task); 267 if (rc) { 268 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 269 "BM_%d : sc %p invalidation failed %d\n", 270 sc, rc); 271 return FAILED; 272 } 273 274 return iscsi_eh_abort(sc); 275 } 276 277 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 278 { 279 struct beiscsi_invldt_cmd_tbl { 280 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 281 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 282 } *inv_tbl; 283 struct iscsi_cls_session *cls_session; 284 struct beiscsi_conn *beiscsi_conn; 285 struct beiscsi_io_task *io_task; 286 struct iscsi_session *session; 287 struct beiscsi_hba *phba; 288 struct iscsi_conn *conn; 289 struct iscsi_task *task; 290 unsigned int i, nents; 291 int rc, more = 0; 292 293 cls_session = starget_to_session(scsi_target(sc->device)); 294 session = cls_session->dd_data; 295 296 spin_lock_bh(&session->frwd_lock); 297 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 298 spin_unlock_bh(&session->frwd_lock); 299 return FAILED; 300 } 301 302 conn = session->leadconn; 303 beiscsi_conn = conn->dd_data; 304 phba = beiscsi_conn->phba; 305 306 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 307 if (!inv_tbl) { 308 spin_unlock_bh(&session->frwd_lock); 309 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 310 "BM_%d : invldt_cmd_tbl alloc failed\n"); 311 return FAILED; 312 } 313 nents = 0; 314 /* take back_lock to prevent task from getting cleaned up under us */ 315 spin_lock(&session->back_lock); 316 for (i = 0; i < conn->session->cmds_max; i++) { 317 task = conn->session->cmds[i]; 318 if (!task->sc) 319 continue; 320 321 if (sc->device->lun != task->sc->device->lun) 322 continue; 323 /** 324 * Can't fit in more cmds? Normally this won't happen b'coz 325 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 326 */ 327 if (nents == BE_INVLDT_CMD_TBL_SZ) { 328 more = 1; 329 break; 330 } 331 332 /* get a task ref till FW processes the req for the ICD used */ 333 if (!iscsi_get_task(task)) { 334 /* 335 * The task has completed in the driver and is 336 * completing in libiscsi. Just ignore it here. When we 337 * call iscsi_eh_device_reset, it will wait for us. 338 */ 339 continue; 340 } 341 342 io_task = task->dd_data; 343 /* mark WRB invalid which have been not processed by FW yet */ 344 if (is_chip_be2_be3r(phba)) { 345 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 346 io_task->pwrb_handle->pwrb, 1); 347 } else { 348 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 349 io_task->pwrb_handle->pwrb, 1); 350 } 351 352 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 353 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 354 inv_tbl->task[nents] = task; 355 nents++; 356 } 357 spin_unlock(&session->back_lock); 358 spin_unlock_bh(&session->frwd_lock); 359 360 rc = SUCCESS; 361 if (!nents) 362 goto end_reset; 363 364 if (more) { 365 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 366 "BM_%d : number of cmds exceeds size of invalidation table\n"); 367 rc = FAILED; 368 goto end_reset; 369 } 370 371 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 372 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 373 "BM_%d : cid %u scmds invalidation failed\n", 374 beiscsi_conn->beiscsi_conn_cid); 375 rc = FAILED; 376 } 377 378 end_reset: 379 for (i = 0; i < nents; i++) 380 iscsi_put_task(inv_tbl->task[i]); 381 kfree(inv_tbl); 382 383 if (rc == SUCCESS) 384 rc = iscsi_eh_device_reset(sc); 385 return rc; 386 } 387 388 /*------------------- PCI Driver operations and data ----------------- */ 389 static const struct pci_device_id beiscsi_pci_id_table[] = { 390 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 391 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 392 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 393 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 394 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 395 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 396 { 0 } 397 }; 398 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 399 400 401 static const struct scsi_host_template beiscsi_sht = { 402 .module = THIS_MODULE, 403 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 404 .proc_name = DRV_NAME, 405 .queuecommand = iscsi_queuecommand, 406 .change_queue_depth = scsi_change_queue_depth, 407 .target_alloc = iscsi_target_alloc, 408 .eh_timed_out = iscsi_eh_cmd_timed_out, 409 .eh_abort_handler = beiscsi_eh_abort, 410 .eh_device_reset_handler = beiscsi_eh_device_reset, 411 .eh_target_reset_handler = iscsi_eh_session_reset, 412 .shost_groups = beiscsi_groups, 413 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 414 .can_queue = BE2_IO_DEPTH, 415 .this_id = -1, 416 .max_sectors = BEISCSI_MAX_SECTORS, 417 .max_segment_size = 65536, 418 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 419 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 420 .track_queue_depth = 1, 421 .cmd_size = sizeof(struct iscsi_cmd), 422 }; 423 424 static struct scsi_transport_template *beiscsi_scsi_transport; 425 426 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 427 { 428 struct beiscsi_hba *phba; 429 struct Scsi_Host *shost; 430 431 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 432 if (!shost) { 433 dev_err(&pcidev->dev, 434 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 435 return NULL; 436 } 437 shost->max_id = BE2_MAX_SESSIONS - 1; 438 shost->max_channel = 0; 439 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 440 shost->max_lun = BEISCSI_NUM_MAX_LUN; 441 shost->transportt = beiscsi_scsi_transport; 442 phba = iscsi_host_priv(shost); 443 memset(phba, 0, sizeof(*phba)); 444 phba->shost = shost; 445 phba->pcidev = pci_dev_get(pcidev); 446 pci_set_drvdata(pcidev, phba); 447 phba->interface_handle = 0xFFFFFFFF; 448 449 return phba; 450 } 451 452 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 453 { 454 if (phba->csr_va) { 455 iounmap(phba->csr_va); 456 phba->csr_va = NULL; 457 } 458 if (phba->db_va) { 459 iounmap(phba->db_va); 460 phba->db_va = NULL; 461 } 462 if (phba->pci_va) { 463 iounmap(phba->pci_va); 464 phba->pci_va = NULL; 465 } 466 } 467 468 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 469 struct pci_dev *pcidev) 470 { 471 u8 __iomem *addr; 472 int pcicfg_reg; 473 474 addr = ioremap(pci_resource_start(pcidev, 2), 475 pci_resource_len(pcidev, 2)); 476 if (addr == NULL) 477 return -ENOMEM; 478 phba->ctrl.csr = addr; 479 phba->csr_va = addr; 480 481 addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); 482 if (addr == NULL) 483 goto pci_map_err; 484 phba->ctrl.db = addr; 485 phba->db_va = addr; 486 487 if (phba->generation == BE_GEN2) 488 pcicfg_reg = 1; 489 else 490 pcicfg_reg = 0; 491 492 addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), 493 pci_resource_len(pcidev, pcicfg_reg)); 494 495 if (addr == NULL) 496 goto pci_map_err; 497 phba->ctrl.pcicfg = addr; 498 phba->pci_va = addr; 499 return 0; 500 501 pci_map_err: 502 beiscsi_unmap_pci_function(phba); 503 return -ENOMEM; 504 } 505 506 static int beiscsi_enable_pci(struct pci_dev *pcidev) 507 { 508 int ret; 509 510 ret = pci_enable_device(pcidev); 511 if (ret) { 512 dev_err(&pcidev->dev, 513 "beiscsi_enable_pci - enable device failed\n"); 514 return ret; 515 } 516 517 ret = pci_request_regions(pcidev, DRV_NAME); 518 if (ret) { 519 dev_err(&pcidev->dev, 520 "beiscsi_enable_pci - request region failed\n"); 521 goto pci_dev_disable; 522 } 523 524 pci_set_master(pcidev); 525 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); 526 if (ret) { 527 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); 528 if (ret) { 529 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 530 goto pci_region_release; 531 } 532 } 533 return 0; 534 535 pci_region_release: 536 pci_release_regions(pcidev); 537 pci_dev_disable: 538 pci_disable_device(pcidev); 539 540 return ret; 541 } 542 543 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 544 { 545 struct be_ctrl_info *ctrl = &phba->ctrl; 546 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 547 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 548 int status = 0; 549 550 ctrl->pdev = pdev; 551 status = beiscsi_map_pci_bars(phba, pdev); 552 if (status) 553 return status; 554 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 555 mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, 556 mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); 557 if (!mbox_mem_alloc->va) { 558 beiscsi_unmap_pci_function(phba); 559 return -ENOMEM; 560 } 561 562 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 563 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 564 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 565 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 566 mutex_init(&ctrl->mbox_lock); 567 spin_lock_init(&phba->ctrl.mcc_lock); 568 569 return status; 570 } 571 572 /** 573 * beiscsi_get_params()- Set the config paramters 574 * @phba: ptr device priv structure 575 **/ 576 static void beiscsi_get_params(struct beiscsi_hba *phba) 577 { 578 uint32_t total_cid_count = 0; 579 uint32_t total_icd_count = 0; 580 uint8_t ulp_num = 0; 581 582 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 583 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 584 585 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 586 uint32_t align_mask = 0; 587 uint32_t icd_post_per_page = 0; 588 uint32_t icd_count_unavailable = 0; 589 uint32_t icd_start = 0, icd_count = 0; 590 uint32_t icd_start_align = 0, icd_count_align = 0; 591 592 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 593 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 594 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 595 596 /* Get ICD count that can be posted on each page */ 597 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 598 sizeof(struct iscsi_sge))); 599 align_mask = (icd_post_per_page - 1); 600 601 /* Check if icd_start is aligned ICD per page posting */ 602 if (icd_start % icd_post_per_page) { 603 icd_start_align = ((icd_start + 604 icd_post_per_page) & 605 ~(align_mask)); 606 phba->fw_config. 607 iscsi_icd_start[ulp_num] = 608 icd_start_align; 609 } 610 611 icd_count_align = (icd_count & ~align_mask); 612 613 /* ICD discarded in the process of alignment */ 614 if (icd_start_align) 615 icd_count_unavailable = ((icd_start_align - 616 icd_start) + 617 (icd_count - 618 icd_count_align)); 619 620 /* Updated ICD count available */ 621 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 622 icd_count_unavailable); 623 624 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 625 "BM_%d : Aligned ICD values\n" 626 "\t ICD Start : %d\n" 627 "\t ICD Count : %d\n" 628 "\t ICD Discarded : %d\n", 629 phba->fw_config. 630 iscsi_icd_start[ulp_num], 631 phba->fw_config. 632 iscsi_icd_count[ulp_num], 633 icd_count_unavailable); 634 break; 635 } 636 } 637 638 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 639 phba->params.ios_per_ctrl = (total_icd_count - 640 (total_cid_count + 641 BE2_TMFS + BE2_NOPOUT_REQ)); 642 phba->params.cxns_per_ctrl = total_cid_count; 643 phba->params.icds_per_ctrl = total_icd_count; 644 phba->params.num_sge_per_io = BE2_SGE; 645 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 646 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 647 phba->params.num_eq_entries = 1024; 648 phba->params.num_cq_entries = 1024; 649 phba->params.wrbs_per_cxn = 256; 650 } 651 652 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 653 unsigned int id, unsigned int clr_interrupt, 654 unsigned int num_processed, 655 unsigned char rearm, unsigned char event) 656 { 657 u32 val = 0; 658 659 if (rearm) 660 val |= 1 << DB_EQ_REARM_SHIFT; 661 if (clr_interrupt) 662 val |= 1 << DB_EQ_CLR_SHIFT; 663 if (event) 664 val |= 1 << DB_EQ_EVNT_SHIFT; 665 666 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 667 /* Setting lower order EQ_ID Bits */ 668 val |= (id & DB_EQ_RING_ID_LOW_MASK); 669 670 /* Setting Higher order EQ_ID Bits */ 671 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 672 DB_EQ_RING_ID_HIGH_MASK) 673 << DB_EQ_HIGH_SET_SHIFT); 674 675 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 676 } 677 678 /** 679 * be_isr_mcc - The isr routine of the driver. 680 * @irq: Not used 681 * @dev_id: Pointer to host adapter structure 682 */ 683 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 684 { 685 struct beiscsi_hba *phba; 686 struct be_eq_entry *eqe; 687 struct be_queue_info *eq; 688 struct be_queue_info *mcc; 689 unsigned int mcc_events; 690 struct be_eq_obj *pbe_eq; 691 692 pbe_eq = dev_id; 693 eq = &pbe_eq->q; 694 phba = pbe_eq->phba; 695 mcc = &phba->ctrl.mcc_obj.cq; 696 eqe = queue_tail_node(eq); 697 698 mcc_events = 0; 699 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 700 & EQE_VALID_MASK) { 701 if (((eqe->dw[offsetof(struct amap_eq_entry, 702 resource_id) / 32] & 703 EQE_RESID_MASK) >> 16) == mcc->id) { 704 mcc_events++; 705 } 706 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 707 queue_tail_inc(eq); 708 eqe = queue_tail_node(eq); 709 } 710 711 if (mcc_events) { 712 queue_work(phba->wq, &pbe_eq->mcc_work); 713 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 714 } 715 return IRQ_HANDLED; 716 } 717 718 /** 719 * be_isr_msix - The isr routine of the driver. 720 * @irq: Not used 721 * @dev_id: Pointer to host adapter structure 722 */ 723 static irqreturn_t be_isr_msix(int irq, void *dev_id) 724 { 725 struct beiscsi_hba *phba; 726 struct be_queue_info *eq; 727 struct be_eq_obj *pbe_eq; 728 729 pbe_eq = dev_id; 730 eq = &pbe_eq->q; 731 732 phba = pbe_eq->phba; 733 /* disable interrupt till iopoll completes */ 734 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 735 irq_poll_sched(&pbe_eq->iopoll); 736 737 return IRQ_HANDLED; 738 } 739 740 /** 741 * be_isr - The isr routine of the driver. 742 * @irq: Not used 743 * @dev_id: Pointer to host adapter structure 744 */ 745 static irqreturn_t be_isr(int irq, void *dev_id) 746 { 747 struct beiscsi_hba *phba; 748 struct hwi_controller *phwi_ctrlr; 749 struct hwi_context_memory *phwi_context; 750 struct be_eq_entry *eqe; 751 struct be_queue_info *eq; 752 struct be_queue_info *mcc; 753 unsigned int mcc_events, io_events; 754 struct be_ctrl_info *ctrl; 755 struct be_eq_obj *pbe_eq; 756 int isr, rearm; 757 758 phba = dev_id; 759 ctrl = &phba->ctrl; 760 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 761 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 762 if (!isr) 763 return IRQ_NONE; 764 765 phwi_ctrlr = phba->phwi_ctrlr; 766 phwi_context = phwi_ctrlr->phwi_ctxt; 767 pbe_eq = &phwi_context->be_eq[0]; 768 769 eq = &phwi_context->be_eq[0].q; 770 mcc = &phba->ctrl.mcc_obj.cq; 771 eqe = queue_tail_node(eq); 772 773 io_events = 0; 774 mcc_events = 0; 775 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 776 & EQE_VALID_MASK) { 777 if (((eqe->dw[offsetof(struct amap_eq_entry, 778 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 779 mcc_events++; 780 else 781 io_events++; 782 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 783 queue_tail_inc(eq); 784 eqe = queue_tail_node(eq); 785 } 786 if (!io_events && !mcc_events) 787 return IRQ_NONE; 788 789 /* no need to rearm if interrupt is only for IOs */ 790 rearm = 0; 791 if (mcc_events) { 792 queue_work(phba->wq, &pbe_eq->mcc_work); 793 /* rearm for MCCQ */ 794 rearm = 1; 795 } 796 if (io_events) 797 irq_poll_sched(&pbe_eq->iopoll); 798 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 799 return IRQ_HANDLED; 800 } 801 802 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 803 { 804 struct hwi_context_memory *phwi_context; 805 int i; 806 807 if (!phba->pcidev->msix_enabled) { 808 if (phba->pcidev->irq) 809 free_irq(phba->pcidev->irq, phba); 810 return; 811 } 812 813 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 814 for (i = 0; i <= phba->num_cpus; i++) { 815 free_irq(pci_irq_vector(phba->pcidev, i), 816 &phwi_context->be_eq[i]); 817 kfree(phba->msi_name[i]); 818 } 819 } 820 821 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 822 { 823 struct pci_dev *pcidev = phba->pcidev; 824 struct hwi_controller *phwi_ctrlr; 825 struct hwi_context_memory *phwi_context; 826 int ret, i, j; 827 828 phwi_ctrlr = phba->phwi_ctrlr; 829 phwi_context = phwi_ctrlr->phwi_ctxt; 830 831 if (pcidev->msix_enabled) { 832 for (i = 0; i < phba->num_cpus; i++) { 833 phba->msi_name[i] = kasprintf(GFP_KERNEL, 834 "beiscsi_%02x_%02x", 835 phba->shost->host_no, i); 836 if (!phba->msi_name[i]) { 837 ret = -ENOMEM; 838 goto free_msix_irqs; 839 } 840 841 ret = request_irq(pci_irq_vector(pcidev, i), 842 be_isr_msix, 0, phba->msi_name[i], 843 &phwi_context->be_eq[i]); 844 if (ret) { 845 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 846 "BM_%d : %s-Failed to register msix for i = %d\n", 847 __func__, i); 848 kfree(phba->msi_name[i]); 849 goto free_msix_irqs; 850 } 851 } 852 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 853 phba->shost->host_no); 854 if (!phba->msi_name[i]) { 855 ret = -ENOMEM; 856 goto free_msix_irqs; 857 } 858 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 859 phba->msi_name[i], &phwi_context->be_eq[i]); 860 if (ret) { 861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 862 "BM_%d : %s-Failed to register beiscsi_msix_mcc\n", 863 __func__); 864 kfree(phba->msi_name[i]); 865 goto free_msix_irqs; 866 } 867 868 } else { 869 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 870 "beiscsi", phba); 871 if (ret) { 872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 873 "BM_%d : %s-Failed to register irq\n", 874 __func__); 875 return ret; 876 } 877 } 878 return 0; 879 free_msix_irqs: 880 for (j = i - 1; j >= 0; j--) { 881 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 882 kfree(phba->msi_name[j]); 883 } 884 return ret; 885 } 886 887 void hwi_ring_cq_db(struct beiscsi_hba *phba, 888 unsigned int id, unsigned int num_processed, 889 unsigned char rearm) 890 { 891 u32 val = 0; 892 893 if (rearm) 894 val |= 1 << DB_CQ_REARM_SHIFT; 895 896 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 897 898 /* Setting lower order CQ_ID Bits */ 899 val |= (id & DB_CQ_RING_ID_LOW_MASK); 900 901 /* Setting Higher order CQ_ID Bits */ 902 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 903 DB_CQ_RING_ID_HIGH_MASK) 904 << DB_CQ_HIGH_SET_SHIFT); 905 906 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 907 } 908 909 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 910 { 911 struct sgl_handle *psgl_handle; 912 unsigned long flags; 913 914 spin_lock_irqsave(&phba->io_sgl_lock, flags); 915 if (phba->io_sgl_hndl_avbl) { 916 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 917 "BM_%d : In alloc_io_sgl_handle," 918 " io_sgl_alloc_index=%d\n", 919 phba->io_sgl_alloc_index); 920 921 psgl_handle = phba->io_sgl_hndl_base[phba-> 922 io_sgl_alloc_index]; 923 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 924 phba->io_sgl_hndl_avbl--; 925 if (phba->io_sgl_alloc_index == (phba->params. 926 ios_per_ctrl - 1)) 927 phba->io_sgl_alloc_index = 0; 928 else 929 phba->io_sgl_alloc_index++; 930 } else 931 psgl_handle = NULL; 932 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 933 return psgl_handle; 934 } 935 936 static void 937 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 938 { 939 unsigned long flags; 940 941 spin_lock_irqsave(&phba->io_sgl_lock, flags); 942 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 943 "BM_%d : In free_,io_sgl_free_index=%d\n", 944 phba->io_sgl_free_index); 945 946 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 947 /* 948 * this can happen if clean_task is called on a task that 949 * failed in xmit_task or alloc_pdu. 950 */ 951 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 952 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 953 phba->io_sgl_free_index, 954 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 955 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 956 return; 957 } 958 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 959 phba->io_sgl_hndl_avbl++; 960 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 961 phba->io_sgl_free_index = 0; 962 else 963 phba->io_sgl_free_index++; 964 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 965 } 966 967 static inline struct wrb_handle * 968 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 969 unsigned int wrbs_per_cxn) 970 { 971 struct wrb_handle *pwrb_handle; 972 unsigned long flags; 973 974 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 975 if (!pwrb_context->wrb_handles_available) { 976 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 977 return NULL; 978 } 979 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 980 pwrb_context->wrb_handles_available--; 981 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 982 pwrb_context->alloc_index = 0; 983 else 984 pwrb_context->alloc_index++; 985 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 986 987 if (pwrb_handle) 988 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 989 990 return pwrb_handle; 991 } 992 993 /** 994 * alloc_wrb_handle - To allocate a wrb handle 995 * @phba: The hba pointer 996 * @cid: The cid to use for allocation 997 * @pcontext: ptr to ptr to wrb context 998 * 999 * This happens under session_lock until submission to chip 1000 */ 1001 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 1002 struct hwi_wrb_context **pcontext) 1003 { 1004 struct hwi_wrb_context *pwrb_context; 1005 struct hwi_controller *phwi_ctrlr; 1006 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1007 1008 phwi_ctrlr = phba->phwi_ctrlr; 1009 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1010 /* return the context address */ 1011 *pcontext = pwrb_context; 1012 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1013 } 1014 1015 static inline void 1016 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1017 struct wrb_handle *pwrb_handle, 1018 unsigned int wrbs_per_cxn) 1019 { 1020 unsigned long flags; 1021 1022 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1023 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1024 pwrb_context->wrb_handles_available++; 1025 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1026 pwrb_context->free_index = 0; 1027 else 1028 pwrb_context->free_index++; 1029 pwrb_handle->pio_handle = NULL; 1030 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1031 } 1032 1033 /** 1034 * free_wrb_handle - To free the wrb handle back to pool 1035 * @phba: The hba pointer 1036 * @pwrb_context: The context to free from 1037 * @pwrb_handle: The wrb_handle to free 1038 * 1039 * This happens under session_lock until submission to chip 1040 */ 1041 static void 1042 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1043 struct wrb_handle *pwrb_handle) 1044 { 1045 beiscsi_put_wrb_handle(pwrb_context, 1046 pwrb_handle, 1047 phba->params.wrbs_per_cxn); 1048 beiscsi_log(phba, KERN_INFO, 1049 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1050 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x " 1051 "wrb_handles_available=%d\n", 1052 pwrb_handle, pwrb_context->free_index, 1053 pwrb_context->wrb_handles_available); 1054 } 1055 1056 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1057 { 1058 struct sgl_handle *psgl_handle; 1059 unsigned long flags; 1060 1061 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1062 if (phba->eh_sgl_hndl_avbl) { 1063 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1064 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1065 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1066 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1067 phba->eh_sgl_alloc_index, 1068 phba->eh_sgl_alloc_index); 1069 1070 phba->eh_sgl_hndl_avbl--; 1071 if (phba->eh_sgl_alloc_index == 1072 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1073 1)) 1074 phba->eh_sgl_alloc_index = 0; 1075 else 1076 phba->eh_sgl_alloc_index++; 1077 } else 1078 psgl_handle = NULL; 1079 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1080 return psgl_handle; 1081 } 1082 1083 void 1084 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1085 { 1086 unsigned long flags; 1087 1088 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1089 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1090 "BM_%d : In free_mgmt_sgl_handle," 1091 "eh_sgl_free_index=%d\n", 1092 phba->eh_sgl_free_index); 1093 1094 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1095 /* 1096 * this can happen if clean_task is called on a task that 1097 * failed in xmit_task or alloc_pdu. 1098 */ 1099 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1100 "BM_%d : Double Free in eh SGL ," 1101 "eh_sgl_free_index=%d\n", 1102 phba->eh_sgl_free_index); 1103 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1104 return; 1105 } 1106 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1107 phba->eh_sgl_hndl_avbl++; 1108 if (phba->eh_sgl_free_index == 1109 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1110 phba->eh_sgl_free_index = 0; 1111 else 1112 phba->eh_sgl_free_index++; 1113 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1114 } 1115 1116 static void 1117 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1118 struct iscsi_task *task, 1119 struct common_sol_cqe *csol_cqe) 1120 { 1121 struct beiscsi_io_task *io_task = task->dd_data; 1122 struct be_status_bhs *sts_bhs = 1123 (struct be_status_bhs *)io_task->cmd_bhs; 1124 struct iscsi_conn *conn = beiscsi_conn->conn; 1125 unsigned char *sense; 1126 u32 resid = 0, exp_cmdsn, max_cmdsn; 1127 u8 rsp, status, flags; 1128 1129 exp_cmdsn = csol_cqe->exp_cmdsn; 1130 max_cmdsn = (csol_cqe->exp_cmdsn + 1131 csol_cqe->cmd_wnd - 1); 1132 rsp = csol_cqe->i_resp; 1133 status = csol_cqe->i_sts; 1134 flags = csol_cqe->i_flags; 1135 resid = csol_cqe->res_cnt; 1136 1137 if (!task->sc) { 1138 if (io_task->scsi_cmnd) { 1139 scsi_dma_unmap(io_task->scsi_cmnd); 1140 io_task->scsi_cmnd = NULL; 1141 } 1142 1143 return; 1144 } 1145 task->sc->result = (DID_OK << 16) | status; 1146 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1147 task->sc->result = DID_ERROR << 16; 1148 goto unmap; 1149 } 1150 1151 /* bidi not initially supported */ 1152 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1153 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1154 task->sc->result = DID_ERROR << 16; 1155 1156 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1157 scsi_set_resid(task->sc, resid); 1158 if (!status && (scsi_bufflen(task->sc) - resid < 1159 task->sc->underflow)) 1160 task->sc->result = DID_ERROR << 16; 1161 } 1162 } 1163 1164 if (status == SAM_STAT_CHECK_CONDITION) { 1165 u16 sense_len; 1166 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1167 1168 sense = sts_bhs->sense_info + sizeof(unsigned short); 1169 sense_len = be16_to_cpu(*slen); 1170 memcpy(task->sc->sense_buffer, sense, 1171 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1172 } 1173 1174 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1175 conn->rxdata_octets += resid; 1176 unmap: 1177 if (io_task->scsi_cmnd) { 1178 scsi_dma_unmap(io_task->scsi_cmnd); 1179 io_task->scsi_cmnd = NULL; 1180 } 1181 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1182 } 1183 1184 static void 1185 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1186 struct iscsi_task *task, 1187 struct common_sol_cqe *csol_cqe) 1188 { 1189 struct iscsi_logout_rsp *hdr; 1190 struct beiscsi_io_task *io_task = task->dd_data; 1191 struct iscsi_conn *conn = beiscsi_conn->conn; 1192 1193 hdr = (struct iscsi_logout_rsp *)task->hdr; 1194 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1195 hdr->t2wait = 5; 1196 hdr->t2retain = 0; 1197 hdr->flags = csol_cqe->i_flags; 1198 hdr->response = csol_cqe->i_resp; 1199 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1200 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1201 csol_cqe->cmd_wnd - 1); 1202 1203 hdr->dlength[0] = 0; 1204 hdr->dlength[1] = 0; 1205 hdr->dlength[2] = 0; 1206 hdr->hlength = 0; 1207 hdr->itt = io_task->libiscsi_itt; 1208 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1209 } 1210 1211 static void 1212 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1213 struct iscsi_task *task, 1214 struct common_sol_cqe *csol_cqe) 1215 { 1216 struct iscsi_tm_rsp *hdr; 1217 struct iscsi_conn *conn = beiscsi_conn->conn; 1218 struct beiscsi_io_task *io_task = task->dd_data; 1219 1220 hdr = (struct iscsi_tm_rsp *)task->hdr; 1221 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1222 hdr->flags = csol_cqe->i_flags; 1223 hdr->response = csol_cqe->i_resp; 1224 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1225 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1226 csol_cqe->cmd_wnd - 1); 1227 1228 hdr->itt = io_task->libiscsi_itt; 1229 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1230 } 1231 1232 static void 1233 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1234 struct beiscsi_hba *phba, struct sol_cqe *psol) 1235 { 1236 struct hwi_wrb_context *pwrb_context; 1237 uint16_t wrb_index, cid, cri_index; 1238 struct hwi_controller *phwi_ctrlr; 1239 struct wrb_handle *pwrb_handle; 1240 struct iscsi_session *session; 1241 struct iscsi_task *task; 1242 1243 phwi_ctrlr = phba->phwi_ctrlr; 1244 if (is_chip_be2_be3r(phba)) { 1245 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1246 wrb_idx, psol); 1247 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1248 cid, psol); 1249 } else { 1250 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1251 wrb_idx, psol); 1252 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1253 cid, psol); 1254 } 1255 1256 cri_index = BE_GET_CRI_FROM_CID(cid); 1257 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1258 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1259 session = beiscsi_conn->conn->session; 1260 spin_lock_bh(&session->back_lock); 1261 task = pwrb_handle->pio_handle; 1262 if (task) 1263 __iscsi_put_task(task); 1264 spin_unlock_bh(&session->back_lock); 1265 } 1266 1267 static void 1268 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1269 struct iscsi_task *task, 1270 struct common_sol_cqe *csol_cqe) 1271 { 1272 struct iscsi_nopin *hdr; 1273 struct iscsi_conn *conn = beiscsi_conn->conn; 1274 struct beiscsi_io_task *io_task = task->dd_data; 1275 1276 hdr = (struct iscsi_nopin *)task->hdr; 1277 hdr->flags = csol_cqe->i_flags; 1278 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1279 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1280 csol_cqe->cmd_wnd - 1); 1281 1282 hdr->opcode = ISCSI_OP_NOOP_IN; 1283 hdr->itt = io_task->libiscsi_itt; 1284 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1285 } 1286 1287 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1288 struct sol_cqe *psol, 1289 struct common_sol_cqe *csol_cqe) 1290 { 1291 if (is_chip_be2_be3r(phba)) { 1292 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1293 i_exp_cmd_sn, psol); 1294 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1295 i_res_cnt, psol); 1296 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1297 i_cmd_wnd, psol); 1298 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1299 wrb_index, psol); 1300 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1301 cid, psol); 1302 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1303 hw_sts, psol); 1304 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1305 i_resp, psol); 1306 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1307 i_sts, psol); 1308 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1309 i_flags, psol); 1310 } else { 1311 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1312 i_exp_cmd_sn, psol); 1313 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1314 i_res_cnt, psol); 1315 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1316 wrb_index, psol); 1317 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1318 cid, psol); 1319 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1320 hw_sts, psol); 1321 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1322 i_cmd_wnd, psol); 1323 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1324 cmd_cmpl, psol)) 1325 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1326 i_sts, psol); 1327 else 1328 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1329 i_sts, psol); 1330 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1331 u, psol)) 1332 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1333 1334 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1335 o, psol)) 1336 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1337 } 1338 } 1339 1340 1341 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1342 struct beiscsi_hba *phba, struct sol_cqe *psol) 1343 { 1344 struct iscsi_conn *conn = beiscsi_conn->conn; 1345 struct iscsi_session *session = conn->session; 1346 struct common_sol_cqe csol_cqe = {0}; 1347 struct hwi_wrb_context *pwrb_context; 1348 struct hwi_controller *phwi_ctrlr; 1349 struct wrb_handle *pwrb_handle; 1350 struct iscsi_task *task; 1351 uint16_t cri_index = 0; 1352 uint8_t type; 1353 1354 phwi_ctrlr = phba->phwi_ctrlr; 1355 1356 /* Copy the elements to a common structure */ 1357 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1358 1359 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1360 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1361 1362 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1363 csol_cqe.wrb_index]; 1364 1365 spin_lock_bh(&session->back_lock); 1366 task = pwrb_handle->pio_handle; 1367 if (!task) { 1368 spin_unlock_bh(&session->back_lock); 1369 return; 1370 } 1371 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1372 1373 switch (type) { 1374 case HWH_TYPE_IO: 1375 case HWH_TYPE_IO_RD: 1376 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1377 ISCSI_OP_NOOP_OUT) 1378 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1379 else 1380 be_complete_io(beiscsi_conn, task, &csol_cqe); 1381 break; 1382 1383 case HWH_TYPE_LOGOUT: 1384 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1385 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1386 else 1387 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1388 break; 1389 1390 case HWH_TYPE_LOGIN: 1391 beiscsi_log(phba, KERN_ERR, 1392 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1393 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1394 " %s- Solicited path\n", __func__); 1395 break; 1396 1397 case HWH_TYPE_NOP: 1398 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1399 break; 1400 1401 default: 1402 beiscsi_log(phba, KERN_WARNING, 1403 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1404 "BM_%d : In %s, unknown type = %d " 1405 "wrb_index 0x%x CID 0x%x\n", __func__, type, 1406 csol_cqe.wrb_index, 1407 csol_cqe.cid); 1408 break; 1409 } 1410 1411 spin_unlock_bh(&session->back_lock); 1412 } 1413 1414 /* 1415 * ASYNC PDUs include 1416 * a. Unsolicited NOP-In (target initiated NOP-In) 1417 * b. ASYNC Messages 1418 * c. Reject PDU 1419 * d. Login response 1420 * These headers arrive unprocessed by the EP firmware. 1421 * iSCSI layer processes them. 1422 */ 1423 static unsigned int 1424 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1425 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1426 { 1427 struct beiscsi_hba *phba = beiscsi_conn->phba; 1428 struct iscsi_conn *conn = beiscsi_conn->conn; 1429 struct beiscsi_io_task *io_task; 1430 struct iscsi_hdr *login_hdr; 1431 struct iscsi_task *task; 1432 u8 code; 1433 1434 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1435 switch (code) { 1436 case ISCSI_OP_NOOP_IN: 1437 pdata = NULL; 1438 dlen = 0; 1439 break; 1440 case ISCSI_OP_ASYNC_EVENT: 1441 break; 1442 case ISCSI_OP_REJECT: 1443 WARN_ON(!pdata); 1444 WARN_ON(!(dlen == 48)); 1445 beiscsi_log(phba, KERN_ERR, 1446 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1447 "BM_%d : In ISCSI_OP_REJECT\n"); 1448 break; 1449 case ISCSI_OP_LOGIN_RSP: 1450 case ISCSI_OP_TEXT_RSP: 1451 task = conn->login_task; 1452 io_task = task->dd_data; 1453 login_hdr = (struct iscsi_hdr *)phdr; 1454 login_hdr->itt = io_task->libiscsi_itt; 1455 break; 1456 default: 1457 beiscsi_log(phba, KERN_WARNING, 1458 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1459 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1460 code); 1461 return 1; 1462 } 1463 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1464 return 0; 1465 } 1466 1467 static inline void 1468 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1469 struct hd_async_handle *pasync_handle) 1470 { 1471 pasync_handle->is_final = 0; 1472 pasync_handle->buffer_len = 0; 1473 pasync_handle->in_use = 0; 1474 list_del_init(&pasync_handle->link); 1475 } 1476 1477 static void 1478 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1479 struct hd_async_context *pasync_ctx, 1480 u16 cri) 1481 { 1482 struct hd_async_handle *pasync_handle, *tmp_handle; 1483 struct list_head *plist; 1484 1485 plist = &pasync_ctx->async_entry[cri].wq.list; 1486 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1487 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1488 1489 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1490 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1491 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1492 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1493 } 1494 1495 static struct hd_async_handle * 1496 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1497 struct hd_async_context *pasync_ctx, 1498 struct i_t_dpdu_cqe *pdpdu_cqe, 1499 u8 *header) 1500 { 1501 struct beiscsi_hba *phba = beiscsi_conn->phba; 1502 struct hd_async_handle *pasync_handle; 1503 struct be_bus_address phys_addr; 1504 u16 cid, code, ci, cri; 1505 u8 final, error = 0; 1506 u32 dpl; 1507 1508 cid = beiscsi_conn->beiscsi_conn_cid; 1509 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1510 /** 1511 * This function is invoked to get the right async_handle structure 1512 * from a given DEF PDU CQ entry. 1513 * 1514 * - index in CQ entry gives the vertical index 1515 * - address in CQ entry is the offset where the DMA last ended 1516 * - final - no more notifications for this PDU 1517 */ 1518 if (is_chip_be2_be3r(phba)) { 1519 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1520 dpl, pdpdu_cqe); 1521 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1522 index, pdpdu_cqe); 1523 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1524 final, pdpdu_cqe); 1525 } else { 1526 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1527 dpl, pdpdu_cqe); 1528 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1529 index, pdpdu_cqe); 1530 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1531 final, pdpdu_cqe); 1532 } 1533 1534 /** 1535 * DB addr Hi/Lo is same for BE and SKH. 1536 * Subtract the dataplacementlength to get to the base. 1537 */ 1538 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1539 db_addr_lo, pdpdu_cqe); 1540 phys_addr.u.a32.address_lo -= dpl; 1541 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1542 db_addr_hi, pdpdu_cqe); 1543 1544 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1545 switch (code) { 1546 case UNSOL_HDR_NOTIFY: 1547 pasync_handle = pasync_ctx->async_entry[ci].header; 1548 *header = 1; 1549 break; 1550 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1551 error = 1; 1552 fallthrough; 1553 case UNSOL_DATA_NOTIFY: 1554 pasync_handle = pasync_ctx->async_entry[ci].data; 1555 break; 1556 /* called only for above codes */ 1557 default: 1558 return NULL; 1559 } 1560 1561 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1562 pasync_handle->index != ci) { 1563 /* driver bug - if ci does not match async handle index */ 1564 error = 1; 1565 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1566 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1567 cid, pasync_handle->is_header ? 'H' : 'D', 1568 pasync_handle->pa.u.a64.address, 1569 pasync_handle->index, 1570 phys_addr.u.a64.address, ci); 1571 /* FW has stale address - attempt continuing by dropping */ 1572 } 1573 1574 /** 1575 * DEF PDU header and data buffers with errors should be simply 1576 * dropped as there are no consumers for it. 1577 */ 1578 if (error) { 1579 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1580 return NULL; 1581 } 1582 1583 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1584 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1585 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1586 cid, code, ci, phys_addr.u.a64.address); 1587 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1588 } 1589 1590 list_del_init(&pasync_handle->link); 1591 /** 1592 * Each CID is associated with unique CRI. 1593 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1594 **/ 1595 pasync_handle->cri = cri; 1596 pasync_handle->is_final = final; 1597 pasync_handle->buffer_len = dpl; 1598 pasync_handle->in_use = 1; 1599 1600 return pasync_handle; 1601 } 1602 1603 static unsigned int 1604 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1605 struct hd_async_context *pasync_ctx, 1606 u16 cri) 1607 { 1608 struct iscsi_session *session = beiscsi_conn->conn->session; 1609 struct hd_async_handle *pasync_handle, *plast_handle; 1610 struct beiscsi_hba *phba = beiscsi_conn->phba; 1611 void *phdr = NULL, *pdata = NULL; 1612 u32 dlen = 0, status = 0; 1613 struct list_head *plist; 1614 1615 plist = &pasync_ctx->async_entry[cri].wq.list; 1616 plast_handle = NULL; 1617 list_for_each_entry(pasync_handle, plist, link) { 1618 plast_handle = pasync_handle; 1619 /* get the header, the first entry */ 1620 if (!phdr) { 1621 phdr = pasync_handle->pbuffer; 1622 continue; 1623 } 1624 /* use first buffer to collect all the data */ 1625 if (!pdata) { 1626 pdata = pasync_handle->pbuffer; 1627 dlen = pasync_handle->buffer_len; 1628 continue; 1629 } 1630 if (!pasync_handle->buffer_len || 1631 (dlen + pasync_handle->buffer_len) > 1632 pasync_ctx->async_data.buffer_size) 1633 break; 1634 memcpy(pdata + dlen, pasync_handle->pbuffer, 1635 pasync_handle->buffer_len); 1636 dlen += pasync_handle->buffer_len; 1637 } 1638 1639 if (!plast_handle->is_final) { 1640 /* last handle should have final PDU notification from FW */ 1641 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1642 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1643 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1644 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1645 pasync_ctx->async_entry[cri].wq.hdr_len, 1646 pasync_ctx->async_entry[cri].wq.bytes_needed, 1647 pasync_ctx->async_entry[cri].wq.bytes_received); 1648 } 1649 spin_lock_bh(&session->back_lock); 1650 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1651 spin_unlock_bh(&session->back_lock); 1652 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1653 return status; 1654 } 1655 1656 static unsigned int 1657 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1658 struct hd_async_context *pasync_ctx, 1659 struct hd_async_handle *pasync_handle) 1660 { 1661 unsigned int bytes_needed = 0, status = 0; 1662 u16 cri = pasync_handle->cri; 1663 struct cri_wait_queue *wq; 1664 struct beiscsi_hba *phba; 1665 struct pdu_base *ppdu; 1666 char *err = ""; 1667 1668 phba = beiscsi_conn->phba; 1669 wq = &pasync_ctx->async_entry[cri].wq; 1670 if (pasync_handle->is_header) { 1671 /* check if PDU hdr is rcv'd when old hdr not completed */ 1672 if (wq->hdr_len) { 1673 err = "incomplete"; 1674 goto drop_pdu; 1675 } 1676 ppdu = pasync_handle->pbuffer; 1677 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1678 data_len_hi, ppdu); 1679 bytes_needed <<= 16; 1680 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1681 data_len_lo, ppdu)); 1682 wq->hdr_len = pasync_handle->buffer_len; 1683 wq->bytes_received = 0; 1684 wq->bytes_needed = bytes_needed; 1685 list_add_tail(&pasync_handle->link, &wq->list); 1686 if (!bytes_needed) 1687 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1688 pasync_ctx, cri); 1689 } else { 1690 /* check if data received has header and is needed */ 1691 if (!wq->hdr_len || !wq->bytes_needed) { 1692 err = "header less"; 1693 goto drop_pdu; 1694 } 1695 wq->bytes_received += pasync_handle->buffer_len; 1696 /* Something got overwritten? Better catch it here. */ 1697 if (wq->bytes_received > wq->bytes_needed) { 1698 err = "overflow"; 1699 goto drop_pdu; 1700 } 1701 list_add_tail(&pasync_handle->link, &wq->list); 1702 if (wq->bytes_received == wq->bytes_needed) 1703 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1704 pasync_ctx, cri); 1705 } 1706 return status; 1707 1708 drop_pdu: 1709 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1710 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1711 beiscsi_conn->beiscsi_conn_cid, err, 1712 pasync_handle->is_header ? 'H' : 'D', 1713 wq->hdr_len, wq->bytes_needed, 1714 pasync_handle->buffer_len); 1715 /* discard this handle */ 1716 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1717 /* free all the other handles in cri_wait_queue */ 1718 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1719 /* try continuing */ 1720 return status; 1721 } 1722 1723 static void 1724 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1725 u8 header, u8 ulp_num, u16 nbuf) 1726 { 1727 struct hd_async_handle *pasync_handle; 1728 struct hd_async_context *pasync_ctx; 1729 struct hwi_controller *phwi_ctrlr; 1730 struct phys_addr *pasync_sge; 1731 u32 ring_id, doorbell = 0; 1732 u32 doorbell_offset; 1733 u16 prod, pi; 1734 1735 phwi_ctrlr = phba->phwi_ctrlr; 1736 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1737 if (header) { 1738 pasync_sge = pasync_ctx->async_header.ring_base; 1739 pi = pasync_ctx->async_header.pi; 1740 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1741 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1742 doorbell_offset; 1743 } else { 1744 pasync_sge = pasync_ctx->async_data.ring_base; 1745 pi = pasync_ctx->async_data.pi; 1746 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1747 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1748 doorbell_offset; 1749 } 1750 1751 for (prod = 0; prod < nbuf; prod++) { 1752 if (header) 1753 pasync_handle = pasync_ctx->async_entry[pi].header; 1754 else 1755 pasync_handle = pasync_ctx->async_entry[pi].data; 1756 WARN_ON(pasync_handle->is_header != header); 1757 WARN_ON(pasync_handle->index != pi); 1758 /* setup the ring only once */ 1759 if (nbuf == pasync_ctx->num_entries) { 1760 /* note hi is lo */ 1761 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1762 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1763 } 1764 if (++pi == pasync_ctx->num_entries) 1765 pi = 0; 1766 } 1767 1768 if (header) 1769 pasync_ctx->async_header.pi = pi; 1770 else 1771 pasync_ctx->async_data.pi = pi; 1772 1773 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1774 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1775 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1776 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1777 iowrite32(doorbell, phba->db_va + doorbell_offset); 1778 } 1779 1780 static void 1781 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1782 struct i_t_dpdu_cqe *pdpdu_cqe) 1783 { 1784 struct beiscsi_hba *phba = beiscsi_conn->phba; 1785 struct hd_async_handle *pasync_handle = NULL; 1786 struct hd_async_context *pasync_ctx; 1787 struct hwi_controller *phwi_ctrlr; 1788 u8 ulp_num, consumed, header = 0; 1789 u16 cid_cri; 1790 1791 phwi_ctrlr = phba->phwi_ctrlr; 1792 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1793 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1794 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1795 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1796 pdpdu_cqe, &header); 1797 if (is_chip_be2_be3r(phba)) 1798 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1799 num_cons, pdpdu_cqe); 1800 else 1801 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1802 num_cons, pdpdu_cqe); 1803 if (pasync_handle) 1804 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1805 /* num_cons indicates number of 8 RQEs consumed */ 1806 if (consumed) 1807 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1808 } 1809 1810 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1811 { 1812 struct be_queue_info *mcc_cq; 1813 struct be_mcc_compl *mcc_compl; 1814 unsigned int num_processed = 0; 1815 1816 mcc_cq = &phba->ctrl.mcc_obj.cq; 1817 mcc_compl = queue_tail_node(mcc_cq); 1818 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1819 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1820 if (beiscsi_hba_in_error(phba)) 1821 return; 1822 1823 if (num_processed >= 32) { 1824 hwi_ring_cq_db(phba, mcc_cq->id, 1825 num_processed, 0); 1826 num_processed = 0; 1827 } 1828 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1829 beiscsi_process_async_event(phba, mcc_compl); 1830 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1831 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1832 } 1833 1834 mcc_compl->flags = 0; 1835 queue_tail_inc(mcc_cq); 1836 mcc_compl = queue_tail_node(mcc_cq); 1837 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1838 num_processed++; 1839 } 1840 1841 if (num_processed > 0) 1842 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1843 } 1844 1845 static void beiscsi_mcc_work(struct work_struct *work) 1846 { 1847 struct be_eq_obj *pbe_eq; 1848 struct beiscsi_hba *phba; 1849 1850 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1851 phba = pbe_eq->phba; 1852 beiscsi_process_mcc_cq(phba); 1853 /* rearm EQ for further interrupts */ 1854 if (!beiscsi_hba_in_error(phba)) 1855 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1856 } 1857 1858 /** 1859 * beiscsi_process_cq()- Process the Completion Queue 1860 * @pbe_eq: Event Q on which the Completion has come 1861 * @budget: Max number of events to processed 1862 * 1863 * return 1864 * Number of Completion Entries processed. 1865 **/ 1866 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1867 { 1868 struct be_queue_info *cq; 1869 struct sol_cqe *sol; 1870 unsigned int total = 0; 1871 unsigned int num_processed = 0; 1872 unsigned short code = 0, cid = 0; 1873 uint16_t cri_index = 0; 1874 struct beiscsi_conn *beiscsi_conn; 1875 struct beiscsi_endpoint *beiscsi_ep; 1876 struct iscsi_endpoint *ep; 1877 struct beiscsi_hba *phba; 1878 1879 cq = pbe_eq->cq; 1880 sol = queue_tail_node(cq); 1881 phba = pbe_eq->phba; 1882 1883 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1884 CQE_VALID_MASK) { 1885 if (beiscsi_hba_in_error(phba)) 1886 return 0; 1887 1888 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1889 1890 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1891 CQE_CODE_MASK); 1892 1893 /* Get the CID */ 1894 if (is_chip_be2_be3r(phba)) { 1895 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1896 } else { 1897 if ((code == DRIVERMSG_NOTIFY) || 1898 (code == UNSOL_HDR_NOTIFY) || 1899 (code == UNSOL_DATA_NOTIFY)) 1900 cid = AMAP_GET_BITS( 1901 struct amap_i_t_dpdu_cqe_v2, 1902 cid, sol); 1903 else 1904 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1905 cid, sol); 1906 } 1907 1908 cri_index = BE_GET_CRI_FROM_CID(cid); 1909 ep = phba->ep_array[cri_index]; 1910 1911 if (ep == NULL) { 1912 /* connection has already been freed 1913 * just move on to next one 1914 */ 1915 beiscsi_log(phba, KERN_WARNING, 1916 BEISCSI_LOG_INIT, 1917 "BM_%d : proc cqe of disconn ep: cid %d\n", 1918 cid); 1919 goto proc_next_cqe; 1920 } 1921 1922 beiscsi_ep = ep->dd_data; 1923 beiscsi_conn = beiscsi_ep->conn; 1924 1925 /* replenish cq */ 1926 if (num_processed == 32) { 1927 hwi_ring_cq_db(phba, cq->id, 32, 0); 1928 num_processed = 0; 1929 } 1930 total++; 1931 1932 switch (code) { 1933 case SOL_CMD_COMPLETE: 1934 hwi_complete_cmd(beiscsi_conn, phba, sol); 1935 break; 1936 case DRIVERMSG_NOTIFY: 1937 beiscsi_log(phba, KERN_INFO, 1938 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1939 "BM_%d : Received %s[%d] on CID : %d\n", 1940 cqe_desc[code], code, cid); 1941 1942 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1943 break; 1944 case UNSOL_HDR_NOTIFY: 1945 beiscsi_log(phba, KERN_INFO, 1946 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1947 "BM_%d : Received %s[%d] on CID : %d\n", 1948 cqe_desc[code], code, cid); 1949 1950 spin_lock_bh(&phba->async_pdu_lock); 1951 beiscsi_hdq_process_compl(beiscsi_conn, 1952 (struct i_t_dpdu_cqe *)sol); 1953 spin_unlock_bh(&phba->async_pdu_lock); 1954 break; 1955 case UNSOL_DATA_NOTIFY: 1956 beiscsi_log(phba, KERN_INFO, 1957 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1958 "BM_%d : Received %s[%d] on CID : %d\n", 1959 cqe_desc[code], code, cid); 1960 1961 spin_lock_bh(&phba->async_pdu_lock); 1962 beiscsi_hdq_process_compl(beiscsi_conn, 1963 (struct i_t_dpdu_cqe *)sol); 1964 spin_unlock_bh(&phba->async_pdu_lock); 1965 break; 1966 case CXN_INVALIDATE_INDEX_NOTIFY: 1967 case CMD_INVALIDATED_NOTIFY: 1968 case CXN_INVALIDATE_NOTIFY: 1969 beiscsi_log(phba, KERN_ERR, 1970 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1971 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1972 cqe_desc[code], code, cid); 1973 break; 1974 case CXN_KILLED_HDR_DIGEST_ERR: 1975 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1976 beiscsi_log(phba, KERN_ERR, 1977 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1978 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1979 cqe_desc[code], code, cid); 1980 break; 1981 case CMD_KILLED_INVALID_STATSN_RCVD: 1982 case CMD_KILLED_INVALID_R2T_RCVD: 1983 case CMD_CXN_KILLED_LUN_INVALID: 1984 case CMD_CXN_KILLED_ICD_INVALID: 1985 case CMD_CXN_KILLED_ITT_INVALID: 1986 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1987 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1988 beiscsi_log(phba, KERN_ERR, 1989 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1990 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1991 cqe_desc[code], code, cid); 1992 break; 1993 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1994 beiscsi_log(phba, KERN_ERR, 1995 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1996 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1997 cqe_desc[code], code, cid); 1998 spin_lock_bh(&phba->async_pdu_lock); 1999 /* driver consumes the entry and drops the contents */ 2000 beiscsi_hdq_process_compl(beiscsi_conn, 2001 (struct i_t_dpdu_cqe *)sol); 2002 spin_unlock_bh(&phba->async_pdu_lock); 2003 break; 2004 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2005 case CXN_KILLED_BURST_LEN_MISMATCH: 2006 case CXN_KILLED_AHS_RCVD: 2007 case CXN_KILLED_UNKNOWN_HDR: 2008 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2009 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2010 case CXN_KILLED_TIMED_OUT: 2011 case CXN_KILLED_FIN_RCVD: 2012 case CXN_KILLED_RST_SENT: 2013 case CXN_KILLED_RST_RCVD: 2014 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2015 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2016 case CXN_KILLED_OVER_RUN_RESIDUAL: 2017 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2018 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2019 beiscsi_log(phba, KERN_ERR, 2020 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2021 "BM_%d : Event %s[%d] received on CID : %d\n", 2022 cqe_desc[code], code, cid); 2023 if (beiscsi_conn) 2024 iscsi_conn_failure(beiscsi_conn->conn, 2025 ISCSI_ERR_CONN_FAILED); 2026 break; 2027 default: 2028 beiscsi_log(phba, KERN_ERR, 2029 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2030 "BM_%d : Invalid CQE Event Received Code : %d CID 0x%x...\n", 2031 code, cid); 2032 break; 2033 } 2034 2035 proc_next_cqe: 2036 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2037 queue_tail_inc(cq); 2038 sol = queue_tail_node(cq); 2039 num_processed++; 2040 if (total == budget) 2041 break; 2042 } 2043 2044 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2045 return total; 2046 } 2047 2048 static int be_iopoll(struct irq_poll *iop, int budget) 2049 { 2050 unsigned int ret, io_events; 2051 struct beiscsi_hba *phba; 2052 struct be_eq_obj *pbe_eq; 2053 struct be_eq_entry *eqe = NULL; 2054 struct be_queue_info *eq; 2055 2056 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2057 phba = pbe_eq->phba; 2058 if (beiscsi_hba_in_error(phba)) { 2059 irq_poll_complete(iop); 2060 return 0; 2061 } 2062 2063 io_events = 0; 2064 eq = &pbe_eq->q; 2065 eqe = queue_tail_node(eq); 2066 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2067 EQE_VALID_MASK) { 2068 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2069 queue_tail_inc(eq); 2070 eqe = queue_tail_node(eq); 2071 io_events++; 2072 } 2073 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2074 2075 ret = beiscsi_process_cq(pbe_eq, budget); 2076 pbe_eq->cq_count += ret; 2077 if (ret < budget) { 2078 irq_poll_complete(iop); 2079 beiscsi_log(phba, KERN_INFO, 2080 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2081 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2082 pbe_eq->q.id, ret); 2083 if (!beiscsi_hba_in_error(phba)) 2084 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2085 } 2086 return ret; 2087 } 2088 2089 static void 2090 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2091 unsigned int num_sg, struct beiscsi_io_task *io_task) 2092 { 2093 struct iscsi_sge *psgl; 2094 unsigned int sg_len, index; 2095 unsigned int sge_len = 0; 2096 unsigned long long addr; 2097 struct scatterlist *l_sg; 2098 unsigned int offset; 2099 2100 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2101 io_task->bhs_pa.u.a32.address_lo); 2102 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2103 io_task->bhs_pa.u.a32.address_hi); 2104 2105 l_sg = sg; 2106 for (index = 0; (index < num_sg) && (index < 2); index++, 2107 sg = sg_next(sg)) { 2108 if (index == 0) { 2109 sg_len = sg_dma_len(sg); 2110 addr = (u64) sg_dma_address(sg); 2111 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2112 sge0_addr_lo, pwrb, 2113 lower_32_bits(addr)); 2114 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2115 sge0_addr_hi, pwrb, 2116 upper_32_bits(addr)); 2117 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2118 sge0_len, pwrb, 2119 sg_len); 2120 sge_len = sg_len; 2121 } else { 2122 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2123 pwrb, sge_len); 2124 sg_len = sg_dma_len(sg); 2125 addr = (u64) sg_dma_address(sg); 2126 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2127 sge1_addr_lo, pwrb, 2128 lower_32_bits(addr)); 2129 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2130 sge1_addr_hi, pwrb, 2131 upper_32_bits(addr)); 2132 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2133 sge1_len, pwrb, 2134 sg_len); 2135 } 2136 } 2137 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2138 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2139 2140 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2141 2142 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2143 io_task->bhs_pa.u.a32.address_hi); 2144 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2145 io_task->bhs_pa.u.a32.address_lo); 2146 2147 if (num_sg == 1) { 2148 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2149 1); 2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2151 0); 2152 } else if (num_sg == 2) { 2153 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2154 0); 2155 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2156 1); 2157 } else { 2158 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2159 0); 2160 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2161 0); 2162 } 2163 2164 sg = l_sg; 2165 psgl++; 2166 psgl++; 2167 offset = 0; 2168 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2169 sg_len = sg_dma_len(sg); 2170 addr = (u64) sg_dma_address(sg); 2171 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2172 lower_32_bits(addr)); 2173 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2174 upper_32_bits(addr)); 2175 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2176 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2177 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2178 offset += sg_len; 2179 } 2180 psgl--; 2181 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2182 } 2183 2184 static void 2185 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2186 unsigned int num_sg, struct beiscsi_io_task *io_task) 2187 { 2188 struct iscsi_sge *psgl; 2189 unsigned int sg_len, index; 2190 unsigned int sge_len = 0; 2191 unsigned long long addr; 2192 struct scatterlist *l_sg; 2193 unsigned int offset; 2194 2195 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2196 io_task->bhs_pa.u.a32.address_lo); 2197 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2198 io_task->bhs_pa.u.a32.address_hi); 2199 2200 l_sg = sg; 2201 for (index = 0; (index < num_sg) && (index < 2); index++, 2202 sg = sg_next(sg)) { 2203 if (index == 0) { 2204 sg_len = sg_dma_len(sg); 2205 addr = (u64) sg_dma_address(sg); 2206 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2207 ((u32)(addr & 0xFFFFFFFF))); 2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2209 ((u32)(addr >> 32))); 2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2211 sg_len); 2212 sge_len = sg_len; 2213 } else { 2214 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2215 pwrb, sge_len); 2216 sg_len = sg_dma_len(sg); 2217 addr = (u64) sg_dma_address(sg); 2218 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2219 ((u32)(addr & 0xFFFFFFFF))); 2220 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2221 ((u32)(addr >> 32))); 2222 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2223 sg_len); 2224 } 2225 } 2226 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2227 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2228 2229 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2230 2231 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2232 io_task->bhs_pa.u.a32.address_hi); 2233 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2234 io_task->bhs_pa.u.a32.address_lo); 2235 2236 if (num_sg == 1) { 2237 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2238 1); 2239 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2240 0); 2241 } else if (num_sg == 2) { 2242 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2243 0); 2244 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2245 1); 2246 } else { 2247 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2248 0); 2249 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2250 0); 2251 } 2252 sg = l_sg; 2253 psgl++; 2254 psgl++; 2255 offset = 0; 2256 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2257 sg_len = sg_dma_len(sg); 2258 addr = (u64) sg_dma_address(sg); 2259 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2260 (addr & 0xFFFFFFFF)); 2261 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2262 (addr >> 32)); 2263 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2264 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2265 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2266 offset += sg_len; 2267 } 2268 psgl--; 2269 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2270 } 2271 2272 /** 2273 * hwi_write_buffer()- Populate the WRB with task info 2274 * @pwrb: ptr to the WRB entry 2275 * @task: iscsi task which is to be executed 2276 **/ 2277 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2278 { 2279 struct iscsi_sge *psgl; 2280 struct beiscsi_io_task *io_task = task->dd_data; 2281 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2282 struct beiscsi_hba *phba = beiscsi_conn->phba; 2283 uint8_t dsp_value = 0; 2284 2285 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2286 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2287 io_task->bhs_pa.u.a32.address_lo); 2288 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2289 io_task->bhs_pa.u.a32.address_hi); 2290 2291 if (task->data) { 2292 2293 /* Check for the data_count */ 2294 dsp_value = (task->data_count) ? 1 : 0; 2295 2296 if (is_chip_be2_be3r(phba)) 2297 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2298 pwrb, dsp_value); 2299 else 2300 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2301 pwrb, dsp_value); 2302 2303 /* Map addr only if there is data_count */ 2304 if (dsp_value) { 2305 io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, 2306 task->data, 2307 task->data_count, 2308 DMA_TO_DEVICE); 2309 if (dma_mapping_error(&phba->pcidev->dev, 2310 io_task->mtask_addr)) 2311 return -ENOMEM; 2312 io_task->mtask_data_count = task->data_count; 2313 } else 2314 io_task->mtask_addr = 0; 2315 2316 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2317 lower_32_bits(io_task->mtask_addr)); 2318 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2319 upper_32_bits(io_task->mtask_addr)); 2320 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2321 task->data_count); 2322 2323 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2324 } else { 2325 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2326 io_task->mtask_addr = 0; 2327 } 2328 2329 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2330 2331 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2332 2333 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2334 io_task->bhs_pa.u.a32.address_hi); 2335 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2336 io_task->bhs_pa.u.a32.address_lo); 2337 if (task->data) { 2338 psgl++; 2339 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2340 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2341 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2342 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2343 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2344 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2345 2346 psgl++; 2347 if (task->data) { 2348 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2349 lower_32_bits(io_task->mtask_addr)); 2350 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2351 upper_32_bits(io_task->mtask_addr)); 2352 } 2353 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2354 } 2355 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2356 return 0; 2357 } 2358 2359 /** 2360 * beiscsi_find_mem_req()- Find mem needed 2361 * @phba: ptr to HBA struct 2362 **/ 2363 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2364 { 2365 uint8_t mem_descr_index, ulp_num; 2366 unsigned int num_async_pdu_buf_pages; 2367 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2368 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2369 2370 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2371 2372 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2373 BE_ISCSI_PDU_HEADER_SIZE; 2374 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2375 sizeof(struct hwi_context_memory); 2376 2377 2378 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2379 * (phba->params.wrbs_per_cxn) 2380 * phba->params.cxns_per_ctrl; 2381 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2382 (phba->params.wrbs_per_cxn); 2383 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2384 phba->params.cxns_per_ctrl); 2385 2386 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2387 phba->params.icds_per_ctrl; 2388 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2389 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2390 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2391 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2392 2393 num_async_pdu_buf_sgl_pages = 2394 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2395 phba, ulp_num) * 2396 sizeof(struct phys_addr)); 2397 2398 num_async_pdu_buf_pages = 2399 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2400 phba, ulp_num) * 2401 phba->params.defpdu_hdr_sz); 2402 2403 num_async_pdu_data_pages = 2404 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2405 phba, ulp_num) * 2406 phba->params.defpdu_data_sz); 2407 2408 num_async_pdu_data_sgl_pages = 2409 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2410 phba, ulp_num) * 2411 sizeof(struct phys_addr)); 2412 2413 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2414 (ulp_num * MEM_DESCR_OFFSET)); 2415 phba->mem_req[mem_descr_index] = 2416 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2417 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2418 2419 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2420 (ulp_num * MEM_DESCR_OFFSET)); 2421 phba->mem_req[mem_descr_index] = 2422 num_async_pdu_buf_pages * 2423 PAGE_SIZE; 2424 2425 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2426 (ulp_num * MEM_DESCR_OFFSET)); 2427 phba->mem_req[mem_descr_index] = 2428 num_async_pdu_data_pages * 2429 PAGE_SIZE; 2430 2431 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2432 (ulp_num * MEM_DESCR_OFFSET)); 2433 phba->mem_req[mem_descr_index] = 2434 num_async_pdu_buf_sgl_pages * 2435 PAGE_SIZE; 2436 2437 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2438 (ulp_num * MEM_DESCR_OFFSET)); 2439 phba->mem_req[mem_descr_index] = 2440 num_async_pdu_data_sgl_pages * 2441 PAGE_SIZE; 2442 2443 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2444 (ulp_num * MEM_DESCR_OFFSET)); 2445 phba->mem_req[mem_descr_index] = 2446 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2447 sizeof(struct hd_async_handle); 2448 2449 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2450 (ulp_num * MEM_DESCR_OFFSET)); 2451 phba->mem_req[mem_descr_index] = 2452 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2453 sizeof(struct hd_async_handle); 2454 2455 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2456 (ulp_num * MEM_DESCR_OFFSET)); 2457 phba->mem_req[mem_descr_index] = 2458 sizeof(struct hd_async_context) + 2459 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2460 sizeof(struct hd_async_entry)); 2461 } 2462 } 2463 } 2464 2465 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2466 { 2467 dma_addr_t bus_add; 2468 struct hwi_controller *phwi_ctrlr; 2469 struct be_mem_descriptor *mem_descr; 2470 struct mem_array *mem_arr, *mem_arr_orig; 2471 unsigned int i, j, alloc_size, curr_alloc_size; 2472 2473 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2474 if (!phba->phwi_ctrlr) 2475 return -ENOMEM; 2476 2477 /* Allocate memory for wrb_context */ 2478 phwi_ctrlr = phba->phwi_ctrlr; 2479 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2480 sizeof(struct hwi_wrb_context), 2481 GFP_KERNEL); 2482 if (!phwi_ctrlr->wrb_context) { 2483 kfree(phba->phwi_ctrlr); 2484 return -ENOMEM; 2485 } 2486 2487 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2488 GFP_KERNEL); 2489 if (!phba->init_mem) { 2490 kfree(phwi_ctrlr->wrb_context); 2491 kfree(phba->phwi_ctrlr); 2492 return -ENOMEM; 2493 } 2494 2495 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2496 sizeof(*mem_arr_orig), 2497 GFP_KERNEL); 2498 if (!mem_arr_orig) { 2499 kfree(phba->init_mem); 2500 kfree(phwi_ctrlr->wrb_context); 2501 kfree(phba->phwi_ctrlr); 2502 return -ENOMEM; 2503 } 2504 2505 mem_descr = phba->init_mem; 2506 for (i = 0; i < SE_MEM_MAX; i++) { 2507 if (!phba->mem_req[i]) { 2508 mem_descr->mem_array = NULL; 2509 mem_descr++; 2510 continue; 2511 } 2512 2513 j = 0; 2514 mem_arr = mem_arr_orig; 2515 alloc_size = phba->mem_req[i]; 2516 memset(mem_arr, 0, sizeof(struct mem_array) * 2517 BEISCSI_MAX_FRAGS_INIT); 2518 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2519 do { 2520 mem_arr->virtual_address = 2521 dma_alloc_coherent(&phba->pcidev->dev, 2522 curr_alloc_size, &bus_add, GFP_KERNEL); 2523 if (!mem_arr->virtual_address) { 2524 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2525 goto free_mem; 2526 if (curr_alloc_size - 2527 rounddown_pow_of_two(curr_alloc_size)) 2528 curr_alloc_size = rounddown_pow_of_two 2529 (curr_alloc_size); 2530 else 2531 curr_alloc_size = curr_alloc_size / 2; 2532 } else { 2533 mem_arr->bus_address.u. 2534 a64.address = (__u64) bus_add; 2535 mem_arr->size = curr_alloc_size; 2536 alloc_size -= curr_alloc_size; 2537 curr_alloc_size = min(be_max_phys_size * 2538 1024, alloc_size); 2539 j++; 2540 mem_arr++; 2541 } 2542 } while (alloc_size); 2543 mem_descr->num_elements = j; 2544 mem_descr->size_in_bytes = phba->mem_req[i]; 2545 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2546 GFP_KERNEL); 2547 if (!mem_descr->mem_array) 2548 goto free_mem; 2549 2550 memcpy(mem_descr->mem_array, mem_arr_orig, 2551 sizeof(struct mem_array) * j); 2552 mem_descr++; 2553 } 2554 kfree(mem_arr_orig); 2555 return 0; 2556 free_mem: 2557 mem_descr->num_elements = j; 2558 while ((i) || (j)) { 2559 for (j = mem_descr->num_elements; j > 0; j--) { 2560 dma_free_coherent(&phba->pcidev->dev, 2561 mem_descr->mem_array[j - 1].size, 2562 mem_descr->mem_array[j - 1]. 2563 virtual_address, 2564 (unsigned long)mem_descr-> 2565 mem_array[j - 1]. 2566 bus_address.u.a64.address); 2567 } 2568 if (i) { 2569 i--; 2570 kfree(mem_descr->mem_array); 2571 mem_descr--; 2572 } 2573 } 2574 kfree(mem_arr_orig); 2575 kfree(phba->init_mem); 2576 kfree(phba->phwi_ctrlr->wrb_context); 2577 kfree(phba->phwi_ctrlr); 2578 return -ENOMEM; 2579 } 2580 2581 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2582 { 2583 beiscsi_find_mem_req(phba); 2584 return beiscsi_alloc_mem(phba); 2585 } 2586 2587 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2588 { 2589 struct pdu_data_out *pdata_out; 2590 struct pdu_nop_out *pnop_out; 2591 struct be_mem_descriptor *mem_descr; 2592 2593 mem_descr = phba->init_mem; 2594 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2595 pdata_out = 2596 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2597 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2598 2599 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2600 IIOC_SCSI_DATA); 2601 2602 pnop_out = 2603 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2604 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2605 2606 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2607 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2608 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2609 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2610 } 2611 2612 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2613 { 2614 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2615 struct hwi_context_memory *phwi_ctxt; 2616 struct wrb_handle *pwrb_handle = NULL; 2617 struct hwi_controller *phwi_ctrlr; 2618 struct hwi_wrb_context *pwrb_context; 2619 struct iscsi_wrb *pwrb = NULL; 2620 unsigned int num_cxn_wrbh = 0; 2621 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2622 2623 mem_descr_wrbh = phba->init_mem; 2624 mem_descr_wrbh += HWI_MEM_WRBH; 2625 2626 mem_descr_wrb = phba->init_mem; 2627 mem_descr_wrb += HWI_MEM_WRB; 2628 phwi_ctrlr = phba->phwi_ctrlr; 2629 2630 /* Allocate memory for WRBQ */ 2631 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2632 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2633 sizeof(struct be_queue_info), 2634 GFP_KERNEL); 2635 if (!phwi_ctxt->be_wrbq) { 2636 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2637 "BM_%d : WRBQ Mem Alloc Failed\n"); 2638 return -ENOMEM; 2639 } 2640 2641 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2642 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2643 pwrb_context->pwrb_handle_base = 2644 kcalloc(phba->params.wrbs_per_cxn, 2645 sizeof(struct wrb_handle *), 2646 GFP_KERNEL); 2647 if (!pwrb_context->pwrb_handle_base) { 2648 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2649 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2650 goto init_wrb_hndl_failed; 2651 } 2652 pwrb_context->pwrb_handle_basestd = 2653 kcalloc(phba->params.wrbs_per_cxn, 2654 sizeof(struct wrb_handle *), 2655 GFP_KERNEL); 2656 if (!pwrb_context->pwrb_handle_basestd) { 2657 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2658 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2659 goto init_wrb_hndl_failed; 2660 } 2661 if (!num_cxn_wrbh) { 2662 pwrb_handle = 2663 mem_descr_wrbh->mem_array[idx].virtual_address; 2664 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2665 ((sizeof(struct wrb_handle)) * 2666 phba->params.wrbs_per_cxn)); 2667 idx++; 2668 } 2669 pwrb_context->alloc_index = 0; 2670 pwrb_context->wrb_handles_available = 0; 2671 pwrb_context->free_index = 0; 2672 2673 if (num_cxn_wrbh) { 2674 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2675 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2676 pwrb_context->pwrb_handle_basestd[j] = 2677 pwrb_handle; 2678 pwrb_context->wrb_handles_available++; 2679 pwrb_handle->wrb_index = j; 2680 pwrb_handle++; 2681 } 2682 num_cxn_wrbh--; 2683 } 2684 spin_lock_init(&pwrb_context->wrb_lock); 2685 } 2686 idx = 0; 2687 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2688 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2689 if (!num_cxn_wrb) { 2690 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2691 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2692 ((sizeof(struct iscsi_wrb) * 2693 phba->params.wrbs_per_cxn)); 2694 idx++; 2695 } 2696 2697 if (num_cxn_wrb) { 2698 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2699 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2700 pwrb_handle->pwrb = pwrb; 2701 pwrb++; 2702 } 2703 num_cxn_wrb--; 2704 } 2705 } 2706 return 0; 2707 init_wrb_hndl_failed: 2708 for (j = index; j > 0; j--) { 2709 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2710 kfree(pwrb_context->pwrb_handle_base); 2711 kfree(pwrb_context->pwrb_handle_basestd); 2712 } 2713 return -ENOMEM; 2714 } 2715 2716 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2717 { 2718 uint8_t ulp_num; 2719 struct hwi_controller *phwi_ctrlr; 2720 struct hba_parameters *p = &phba->params; 2721 struct hd_async_context *pasync_ctx; 2722 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2723 unsigned int index, idx, num_per_mem, num_async_data; 2724 struct be_mem_descriptor *mem_descr; 2725 2726 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2727 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2728 /* get async_ctx for each ULP */ 2729 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2730 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2731 (ulp_num * MEM_DESCR_OFFSET)); 2732 2733 phwi_ctrlr = phba->phwi_ctrlr; 2734 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2735 (struct hd_async_context *) 2736 mem_descr->mem_array[0].virtual_address; 2737 2738 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2739 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2740 2741 pasync_ctx->async_entry = 2742 (struct hd_async_entry *) 2743 ((long unsigned int)pasync_ctx + 2744 sizeof(struct hd_async_context)); 2745 2746 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2747 ulp_num); 2748 /* setup header buffers */ 2749 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2750 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2751 (ulp_num * MEM_DESCR_OFFSET); 2752 if (mem_descr->mem_array[0].virtual_address) { 2753 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2754 "BM_%d : hwi_init_async_pdu_ctx" 2755 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2756 ulp_num, 2757 mem_descr->mem_array[0]. 2758 virtual_address); 2759 } else 2760 beiscsi_log(phba, KERN_WARNING, 2761 BEISCSI_LOG_INIT, 2762 "BM_%d : No Virtual address for ULP : %d\n", 2763 ulp_num); 2764 2765 pasync_ctx->async_header.pi = 0; 2766 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2767 pasync_ctx->async_header.va_base = 2768 mem_descr->mem_array[0].virtual_address; 2769 2770 pasync_ctx->async_header.pa_base.u.a64.address = 2771 mem_descr->mem_array[0]. 2772 bus_address.u.a64.address; 2773 2774 /* setup header buffer sgls */ 2775 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2776 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2777 (ulp_num * MEM_DESCR_OFFSET); 2778 if (mem_descr->mem_array[0].virtual_address) { 2779 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2780 "BM_%d : hwi_init_async_pdu_ctx" 2781 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2782 ulp_num, 2783 mem_descr->mem_array[0]. 2784 virtual_address); 2785 } else 2786 beiscsi_log(phba, KERN_WARNING, 2787 BEISCSI_LOG_INIT, 2788 "BM_%d : No Virtual address for ULP : %d\n", 2789 ulp_num); 2790 2791 pasync_ctx->async_header.ring_base = 2792 mem_descr->mem_array[0].virtual_address; 2793 2794 /* setup header buffer handles */ 2795 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2796 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2797 (ulp_num * MEM_DESCR_OFFSET); 2798 if (mem_descr->mem_array[0].virtual_address) { 2799 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2800 "BM_%d : hwi_init_async_pdu_ctx" 2801 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2802 ulp_num, 2803 mem_descr->mem_array[0]. 2804 virtual_address); 2805 } else 2806 beiscsi_log(phba, KERN_WARNING, 2807 BEISCSI_LOG_INIT, 2808 "BM_%d : No Virtual address for ULP : %d\n", 2809 ulp_num); 2810 2811 pasync_ctx->async_header.handle_base = 2812 mem_descr->mem_array[0].virtual_address; 2813 2814 /* setup data buffer sgls */ 2815 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2816 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2817 (ulp_num * MEM_DESCR_OFFSET); 2818 if (mem_descr->mem_array[0].virtual_address) { 2819 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2820 "BM_%d : hwi_init_async_pdu_ctx" 2821 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2822 ulp_num, 2823 mem_descr->mem_array[0]. 2824 virtual_address); 2825 } else 2826 beiscsi_log(phba, KERN_WARNING, 2827 BEISCSI_LOG_INIT, 2828 "BM_%d : No Virtual address for ULP : %d\n", 2829 ulp_num); 2830 2831 pasync_ctx->async_data.ring_base = 2832 mem_descr->mem_array[0].virtual_address; 2833 2834 /* setup data buffer handles */ 2835 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2836 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2837 (ulp_num * MEM_DESCR_OFFSET); 2838 if (!mem_descr->mem_array[0].virtual_address) 2839 beiscsi_log(phba, KERN_WARNING, 2840 BEISCSI_LOG_INIT, 2841 "BM_%d : No Virtual address for ULP : %d\n", 2842 ulp_num); 2843 2844 pasync_ctx->async_data.handle_base = 2845 mem_descr->mem_array[0].virtual_address; 2846 2847 pasync_header_h = 2848 (struct hd_async_handle *) 2849 pasync_ctx->async_header.handle_base; 2850 pasync_data_h = 2851 (struct hd_async_handle *) 2852 pasync_ctx->async_data.handle_base; 2853 2854 /* setup data buffers */ 2855 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2856 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2857 (ulp_num * MEM_DESCR_OFFSET); 2858 if (mem_descr->mem_array[0].virtual_address) { 2859 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2860 "BM_%d : hwi_init_async_pdu_ctx" 2861 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2862 ulp_num, 2863 mem_descr->mem_array[0]. 2864 virtual_address); 2865 } else 2866 beiscsi_log(phba, KERN_WARNING, 2867 BEISCSI_LOG_INIT, 2868 "BM_%d : No Virtual address for ULP : %d\n", 2869 ulp_num); 2870 2871 idx = 0; 2872 pasync_ctx->async_data.pi = 0; 2873 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2874 pasync_ctx->async_data.va_base = 2875 mem_descr->mem_array[idx].virtual_address; 2876 pasync_ctx->async_data.pa_base.u.a64.address = 2877 mem_descr->mem_array[idx]. 2878 bus_address.u.a64.address; 2879 2880 num_async_data = ((mem_descr->mem_array[idx].size) / 2881 phba->params.defpdu_data_sz); 2882 num_per_mem = 0; 2883 2884 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2885 (phba, ulp_num); index++) { 2886 pasync_header_h->cri = -1; 2887 pasync_header_h->is_header = 1; 2888 pasync_header_h->index = index; 2889 INIT_LIST_HEAD(&pasync_header_h->link); 2890 pasync_header_h->pbuffer = 2891 (void *)((unsigned long) 2892 (pasync_ctx-> 2893 async_header.va_base) + 2894 (p->defpdu_hdr_sz * index)); 2895 2896 pasync_header_h->pa.u.a64.address = 2897 pasync_ctx->async_header.pa_base.u.a64. 2898 address + (p->defpdu_hdr_sz * index); 2899 2900 pasync_ctx->async_entry[index].header = 2901 pasync_header_h; 2902 pasync_header_h++; 2903 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2904 wq.list); 2905 2906 pasync_data_h->cri = -1; 2907 pasync_data_h->is_header = 0; 2908 pasync_data_h->index = index; 2909 INIT_LIST_HEAD(&pasync_data_h->link); 2910 2911 if (!num_async_data) { 2912 num_per_mem = 0; 2913 idx++; 2914 pasync_ctx->async_data.va_base = 2915 mem_descr->mem_array[idx]. 2916 virtual_address; 2917 pasync_ctx->async_data.pa_base.u. 2918 a64.address = 2919 mem_descr->mem_array[idx]. 2920 bus_address.u.a64.address; 2921 num_async_data = 2922 ((mem_descr->mem_array[idx]. 2923 size) / 2924 phba->params.defpdu_data_sz); 2925 } 2926 pasync_data_h->pbuffer = 2927 (void *)((unsigned long) 2928 (pasync_ctx->async_data.va_base) + 2929 (p->defpdu_data_sz * num_per_mem)); 2930 2931 pasync_data_h->pa.u.a64.address = 2932 pasync_ctx->async_data.pa_base.u.a64. 2933 address + (p->defpdu_data_sz * 2934 num_per_mem); 2935 num_per_mem++; 2936 num_async_data--; 2937 2938 pasync_ctx->async_entry[index].data = 2939 pasync_data_h; 2940 pasync_data_h++; 2941 } 2942 } 2943 } 2944 2945 return 0; 2946 } 2947 2948 static int 2949 be_sgl_create_contiguous(void *virtual_address, 2950 u64 physical_address, u32 length, 2951 struct be_dma_mem *sgl) 2952 { 2953 WARN_ON(!virtual_address); 2954 WARN_ON(!physical_address); 2955 WARN_ON(!length); 2956 WARN_ON(!sgl); 2957 2958 sgl->va = virtual_address; 2959 sgl->dma = (unsigned long)physical_address; 2960 sgl->size = length; 2961 2962 return 0; 2963 } 2964 2965 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2966 { 2967 memset(sgl, 0, sizeof(*sgl)); 2968 } 2969 2970 static void 2971 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2972 struct mem_array *pmem, struct be_dma_mem *sgl) 2973 { 2974 if (sgl->va) 2975 be_sgl_destroy_contiguous(sgl); 2976 2977 be_sgl_create_contiguous(pmem->virtual_address, 2978 pmem->bus_address.u.a64.address, 2979 pmem->size, sgl); 2980 } 2981 2982 static void 2983 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2984 struct mem_array *pmem, struct be_dma_mem *sgl) 2985 { 2986 if (sgl->va) 2987 be_sgl_destroy_contiguous(sgl); 2988 2989 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2990 pmem->bus_address.u.a64.address, 2991 pmem->size, sgl); 2992 } 2993 2994 static int be_fill_queue(struct be_queue_info *q, 2995 u16 len, u16 entry_size, void *vaddress) 2996 { 2997 struct be_dma_mem *mem = &q->dma_mem; 2998 2999 memset(q, 0, sizeof(*q)); 3000 q->len = len; 3001 q->entry_size = entry_size; 3002 mem->size = len * entry_size; 3003 mem->va = vaddress; 3004 if (!mem->va) 3005 return -ENOMEM; 3006 memset(mem->va, 0, mem->size); 3007 return 0; 3008 } 3009 3010 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3011 struct hwi_context_memory *phwi_context) 3012 { 3013 int ret = -ENOMEM, eq_for_mcc; 3014 unsigned int i, num_eq_pages; 3015 struct be_queue_info *eq; 3016 struct be_dma_mem *mem; 3017 void *eq_vaddress; 3018 dma_addr_t paddr; 3019 3020 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * 3021 sizeof(struct be_eq_entry)); 3022 3023 if (phba->pcidev->msix_enabled) 3024 eq_for_mcc = 1; 3025 else 3026 eq_for_mcc = 0; 3027 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3028 eq = &phwi_context->be_eq[i].q; 3029 mem = &eq->dma_mem; 3030 phwi_context->be_eq[i].phba = phba; 3031 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3032 num_eq_pages * PAGE_SIZE, 3033 &paddr, GFP_KERNEL); 3034 if (!eq_vaddress) { 3035 ret = -ENOMEM; 3036 goto create_eq_error; 3037 } 3038 3039 mem->va = eq_vaddress; 3040 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3041 sizeof(struct be_eq_entry), eq_vaddress); 3042 if (ret) { 3043 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3044 "BM_%d : be_fill_queue Failed for EQ\n"); 3045 goto create_eq_error; 3046 } 3047 3048 mem->dma = paddr; 3049 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3050 BEISCSI_EQ_DELAY_DEF); 3051 if (ret) { 3052 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3053 "BM_%d : beiscsi_cmd_eq_create Failed for EQ\n"); 3054 goto create_eq_error; 3055 } 3056 3057 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3058 "BM_%d : eqid = %d\n", 3059 phwi_context->be_eq[i].q.id); 3060 } 3061 return 0; 3062 3063 create_eq_error: 3064 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3065 eq = &phwi_context->be_eq[i].q; 3066 mem = &eq->dma_mem; 3067 if (mem->va) 3068 dma_free_coherent(&phba->pcidev->dev, num_eq_pages 3069 * PAGE_SIZE, 3070 mem->va, mem->dma); 3071 } 3072 return ret; 3073 } 3074 3075 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3076 struct hwi_context_memory *phwi_context) 3077 { 3078 unsigned int i, num_cq_pages; 3079 struct be_queue_info *cq, *eq; 3080 struct be_dma_mem *mem; 3081 struct be_eq_obj *pbe_eq; 3082 void *cq_vaddress; 3083 int ret = -ENOMEM; 3084 dma_addr_t paddr; 3085 3086 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * 3087 sizeof(struct sol_cqe)); 3088 3089 for (i = 0; i < phba->num_cpus; i++) { 3090 cq = &phwi_context->be_cq[i]; 3091 eq = &phwi_context->be_eq[i].q; 3092 pbe_eq = &phwi_context->be_eq[i]; 3093 pbe_eq->cq = cq; 3094 pbe_eq->phba = phba; 3095 mem = &cq->dma_mem; 3096 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3097 num_cq_pages * PAGE_SIZE, 3098 &paddr, GFP_KERNEL); 3099 if (!cq_vaddress) { 3100 ret = -ENOMEM; 3101 goto create_cq_error; 3102 } 3103 3104 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3105 sizeof(struct sol_cqe), cq_vaddress); 3106 if (ret) { 3107 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3108 "BM_%d : be_fill_queue Failed for ISCSI CQ\n"); 3109 goto create_cq_error; 3110 } 3111 3112 mem->dma = paddr; 3113 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3114 false, 0); 3115 if (ret) { 3116 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3117 "BM_%d : beiscsi_cmd_eq_create Failed for ISCSI CQ\n"); 3118 goto create_cq_error; 3119 } 3120 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3121 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3122 "iSCSI CQ CREATED\n", cq->id, eq->id); 3123 } 3124 return 0; 3125 3126 create_cq_error: 3127 for (i = 0; i < phba->num_cpus; i++) { 3128 cq = &phwi_context->be_cq[i]; 3129 mem = &cq->dma_mem; 3130 if (mem->va) 3131 dma_free_coherent(&phba->pcidev->dev, num_cq_pages 3132 * PAGE_SIZE, 3133 mem->va, mem->dma); 3134 } 3135 return ret; 3136 } 3137 3138 static int 3139 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3140 struct hwi_context_memory *phwi_context, 3141 struct hwi_controller *phwi_ctrlr, 3142 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3143 { 3144 unsigned int idx; 3145 int ret; 3146 struct be_queue_info *dq, *cq; 3147 struct be_dma_mem *mem; 3148 struct be_mem_descriptor *mem_descr; 3149 void *dq_vaddress; 3150 3151 idx = 0; 3152 dq = &phwi_context->be_def_hdrq[ulp_num]; 3153 cq = &phwi_context->be_cq[0]; 3154 mem = &dq->dma_mem; 3155 mem_descr = phba->init_mem; 3156 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3157 (ulp_num * MEM_DESCR_OFFSET); 3158 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3159 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3160 sizeof(struct phys_addr), 3161 sizeof(struct phys_addr), dq_vaddress); 3162 if (ret) { 3163 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3164 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3165 ulp_num); 3166 3167 return ret; 3168 } 3169 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3170 bus_address.u.a64.address; 3171 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3172 def_pdu_ring_sz, 3173 phba->params.defpdu_hdr_sz, 3174 BEISCSI_DEFQ_HDR, ulp_num); 3175 if (ret) { 3176 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3177 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3178 ulp_num); 3179 3180 return ret; 3181 } 3182 3183 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3184 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3185 ulp_num, 3186 phwi_context->be_def_hdrq[ulp_num].id); 3187 return 0; 3188 } 3189 3190 static int 3191 beiscsi_create_def_data(struct beiscsi_hba *phba, 3192 struct hwi_context_memory *phwi_context, 3193 struct hwi_controller *phwi_ctrlr, 3194 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3195 { 3196 unsigned int idx; 3197 int ret; 3198 struct be_queue_info *dataq, *cq; 3199 struct be_dma_mem *mem; 3200 struct be_mem_descriptor *mem_descr; 3201 void *dq_vaddress; 3202 3203 idx = 0; 3204 dataq = &phwi_context->be_def_dataq[ulp_num]; 3205 cq = &phwi_context->be_cq[0]; 3206 mem = &dataq->dma_mem; 3207 mem_descr = phba->init_mem; 3208 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3209 (ulp_num * MEM_DESCR_OFFSET); 3210 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3211 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3212 sizeof(struct phys_addr), 3213 sizeof(struct phys_addr), dq_vaddress); 3214 if (ret) { 3215 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3216 "BM_%d : be_fill_queue Failed for DEF PDU " 3217 "DATA on ULP : %d\n", 3218 ulp_num); 3219 3220 return ret; 3221 } 3222 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3223 bus_address.u.a64.address; 3224 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3225 def_pdu_ring_sz, 3226 phba->params.defpdu_data_sz, 3227 BEISCSI_DEFQ_DATA, ulp_num); 3228 if (ret) { 3229 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3230 "BM_%d be_cmd_create_default_pdu_queue" 3231 " Failed for DEF PDU DATA on ULP : %d\n", 3232 ulp_num); 3233 return ret; 3234 } 3235 3236 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3237 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3238 ulp_num, 3239 phwi_context->be_def_dataq[ulp_num].id); 3240 3241 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3242 "BM_%d : DEFAULT PDU DATA RING CREATED on ULP : %d\n", 3243 ulp_num); 3244 return 0; 3245 } 3246 3247 3248 static int 3249 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3250 { 3251 struct be_mem_descriptor *mem_descr; 3252 struct mem_array *pm_arr; 3253 struct be_dma_mem sgl; 3254 int status, ulp_num; 3255 3256 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3257 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3258 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3259 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3260 (ulp_num * MEM_DESCR_OFFSET); 3261 pm_arr = mem_descr->mem_array; 3262 3263 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3264 status = be_cmd_iscsi_post_template_hdr( 3265 &phba->ctrl, &sgl); 3266 3267 if (status != 0) { 3268 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3269 "BM_%d : Post Template HDR Failed for " 3270 "ULP_%d\n", ulp_num); 3271 return status; 3272 } 3273 3274 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3275 "BM_%d : Template HDR Pages Posted for " 3276 "ULP_%d\n", ulp_num); 3277 } 3278 } 3279 return 0; 3280 } 3281 3282 static int 3283 beiscsi_post_pages(struct beiscsi_hba *phba) 3284 { 3285 struct be_mem_descriptor *mem_descr; 3286 struct mem_array *pm_arr; 3287 unsigned int page_offset, i; 3288 struct be_dma_mem sgl; 3289 int status, ulp_num = 0; 3290 3291 mem_descr = phba->init_mem; 3292 mem_descr += HWI_MEM_SGE; 3293 pm_arr = mem_descr->mem_array; 3294 3295 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3296 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3297 break; 3298 3299 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3300 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3301 for (i = 0; i < mem_descr->num_elements; i++) { 3302 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3303 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3304 page_offset, 3305 (pm_arr->size / PAGE_SIZE)); 3306 page_offset += pm_arr->size / PAGE_SIZE; 3307 if (status != 0) { 3308 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3309 "BM_%d : post sgl failed.\n"); 3310 return status; 3311 } 3312 pm_arr++; 3313 } 3314 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3315 "BM_%d : POSTED PAGES\n"); 3316 return 0; 3317 } 3318 3319 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3320 { 3321 struct be_dma_mem *mem = &q->dma_mem; 3322 if (mem->va) { 3323 dma_free_coherent(&phba->pcidev->dev, mem->size, 3324 mem->va, mem->dma); 3325 mem->va = NULL; 3326 } 3327 } 3328 3329 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3330 u16 len, u16 entry_size) 3331 { 3332 struct be_dma_mem *mem = &q->dma_mem; 3333 3334 memset(q, 0, sizeof(*q)); 3335 q->len = len; 3336 q->entry_size = entry_size; 3337 mem->size = len * entry_size; 3338 mem->va = dma_alloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3339 GFP_KERNEL); 3340 if (!mem->va) 3341 return -ENOMEM; 3342 return 0; 3343 } 3344 3345 static int 3346 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3347 struct hwi_context_memory *phwi_context, 3348 struct hwi_controller *phwi_ctrlr) 3349 { 3350 unsigned int num_wrb_rings; 3351 u64 pa_addr_lo; 3352 unsigned int idx, num, i, ulp_num; 3353 struct mem_array *pwrb_arr; 3354 void *wrb_vaddr; 3355 struct be_dma_mem sgl; 3356 struct be_mem_descriptor *mem_descr; 3357 struct hwi_wrb_context *pwrb_context; 3358 int status; 3359 uint8_t ulp_count = 0, ulp_base_num = 0; 3360 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3361 3362 idx = 0; 3363 mem_descr = phba->init_mem; 3364 mem_descr += HWI_MEM_WRB; 3365 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3366 sizeof(*pwrb_arr), 3367 GFP_KERNEL); 3368 if (!pwrb_arr) { 3369 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3370 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3371 return -ENOMEM; 3372 } 3373 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3374 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3375 num_wrb_rings = mem_descr->mem_array[idx].size / 3376 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3377 3378 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3379 if (num_wrb_rings) { 3380 pwrb_arr[num].virtual_address = wrb_vaddr; 3381 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3382 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3383 sizeof(struct iscsi_wrb); 3384 wrb_vaddr += pwrb_arr[num].size; 3385 pa_addr_lo += pwrb_arr[num].size; 3386 num_wrb_rings--; 3387 } else { 3388 idx++; 3389 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3390 pa_addr_lo = mem_descr->mem_array[idx]. 3391 bus_address.u.a64.address; 3392 num_wrb_rings = mem_descr->mem_array[idx].size / 3393 (phba->params.wrbs_per_cxn * 3394 sizeof(struct iscsi_wrb)); 3395 pwrb_arr[num].virtual_address = wrb_vaddr; 3396 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3397 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3398 sizeof(struct iscsi_wrb); 3399 wrb_vaddr += pwrb_arr[num].size; 3400 pa_addr_lo += pwrb_arr[num].size; 3401 num_wrb_rings--; 3402 } 3403 } 3404 3405 /* Get the ULP Count */ 3406 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3407 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3408 ulp_count++; 3409 ulp_base_num = ulp_num; 3410 cid_count_ulp[ulp_num] = 3411 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3412 } 3413 3414 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3415 if (ulp_count > 1) { 3416 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3417 3418 if (!cid_count_ulp[ulp_base_num]) 3419 ulp_base_num = (ulp_base_num + 1) % 3420 BEISCSI_ULP_COUNT; 3421 3422 cid_count_ulp[ulp_base_num]--; 3423 } 3424 3425 3426 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3427 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3428 &phwi_context->be_wrbq[i], 3429 &phwi_ctrlr->wrb_context[i], 3430 ulp_base_num); 3431 if (status != 0) { 3432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3433 "BM_%d : wrbq create failed."); 3434 kfree(pwrb_arr); 3435 return status; 3436 } 3437 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3438 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3439 } 3440 kfree(pwrb_arr); 3441 return 0; 3442 } 3443 3444 static void free_wrb_handles(struct beiscsi_hba *phba) 3445 { 3446 unsigned int index; 3447 struct hwi_controller *phwi_ctrlr; 3448 struct hwi_wrb_context *pwrb_context; 3449 3450 phwi_ctrlr = phba->phwi_ctrlr; 3451 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3452 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3453 kfree(pwrb_context->pwrb_handle_base); 3454 kfree(pwrb_context->pwrb_handle_basestd); 3455 } 3456 } 3457 3458 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3459 { 3460 struct be_ctrl_info *ctrl = &phba->ctrl; 3461 struct be_dma_mem *ptag_mem; 3462 struct be_queue_info *q; 3463 int i, tag; 3464 3465 q = &phba->ctrl.mcc_obj.q; 3466 for (i = 0; i < MAX_MCC_CMD; i++) { 3467 tag = i + 1; 3468 if (!test_bit(MCC_TAG_STATE_RUNNING, 3469 &ctrl->ptag_state[tag].tag_state)) 3470 continue; 3471 3472 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3473 &ctrl->ptag_state[tag].tag_state)) { 3474 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3475 if (ptag_mem->size) { 3476 dma_free_coherent(&ctrl->pdev->dev, 3477 ptag_mem->size, 3478 ptag_mem->va, 3479 ptag_mem->dma); 3480 ptag_mem->size = 0; 3481 } 3482 continue; 3483 } 3484 /** 3485 * If MCC is still active and waiting then wake up the process. 3486 * We are here only because port is going offline. The process 3487 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3488 * returned for the operation and allocated memory cleaned up. 3489 */ 3490 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3491 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3492 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3493 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3494 /* 3495 * Control tag info gets reinitialized in enable 3496 * so wait for the process to clear running state. 3497 */ 3498 while (test_bit(MCC_TAG_STATE_RUNNING, 3499 &ctrl->ptag_state[tag].tag_state)) 3500 schedule_timeout_uninterruptible(HZ); 3501 } 3502 /** 3503 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3504 * MCC_TAG_STATE_IGNORE nothing needs to done. 3505 */ 3506 } 3507 if (q->created) { 3508 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3509 be_queue_free(phba, q); 3510 } 3511 3512 q = &phba->ctrl.mcc_obj.cq; 3513 if (q->created) { 3514 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3515 be_queue_free(phba, q); 3516 } 3517 } 3518 3519 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3520 struct hwi_context_memory *phwi_context) 3521 { 3522 struct be_queue_info *q, *cq; 3523 struct be_ctrl_info *ctrl = &phba->ctrl; 3524 3525 /* Alloc MCC compl queue */ 3526 cq = &phba->ctrl.mcc_obj.cq; 3527 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3528 sizeof(struct be_mcc_compl))) 3529 goto err; 3530 /* Ask BE to create MCC compl queue; */ 3531 if (phba->pcidev->msix_enabled) { 3532 if (beiscsi_cmd_cq_create(ctrl, cq, 3533 &phwi_context->be_eq[phba->num_cpus].q, 3534 false, true, 0)) 3535 goto mcc_cq_free; 3536 } else { 3537 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3538 false, true, 0)) 3539 goto mcc_cq_free; 3540 } 3541 3542 /* Alloc MCC queue */ 3543 q = &phba->ctrl.mcc_obj.q; 3544 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3545 goto mcc_cq_destroy; 3546 3547 /* Ask BE to create MCC queue */ 3548 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3549 goto mcc_q_free; 3550 3551 return 0; 3552 3553 mcc_q_free: 3554 be_queue_free(phba, q); 3555 mcc_cq_destroy: 3556 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3557 mcc_cq_free: 3558 be_queue_free(phba, cq); 3559 err: 3560 return -ENOMEM; 3561 } 3562 3563 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3564 { 3565 int nvec = 1; 3566 3567 switch (phba->generation) { 3568 case BE_GEN2: 3569 case BE_GEN3: 3570 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3571 break; 3572 case BE_GEN4: 3573 nvec = phba->fw_config.eqid_count; 3574 break; 3575 default: 3576 nvec = 2; 3577 break; 3578 } 3579 3580 /* if eqid_count == 1 fall back to INTX */ 3581 if (enable_msix && nvec > 1) { 3582 struct irq_affinity desc = { .post_vectors = 1 }; 3583 3584 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3585 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3586 phba->num_cpus = nvec - 1; 3587 return; 3588 } 3589 } 3590 3591 phba->num_cpus = 1; 3592 } 3593 3594 static void hwi_purge_eq(struct beiscsi_hba *phba) 3595 { 3596 struct hwi_controller *phwi_ctrlr; 3597 struct hwi_context_memory *phwi_context; 3598 struct be_queue_info *eq; 3599 struct be_eq_entry *eqe = NULL; 3600 int i, eq_msix; 3601 unsigned int num_processed; 3602 3603 if (beiscsi_hba_in_error(phba)) 3604 return; 3605 3606 phwi_ctrlr = phba->phwi_ctrlr; 3607 phwi_context = phwi_ctrlr->phwi_ctxt; 3608 if (phba->pcidev->msix_enabled) 3609 eq_msix = 1; 3610 else 3611 eq_msix = 0; 3612 3613 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3614 eq = &phwi_context->be_eq[i].q; 3615 eqe = queue_tail_node(eq); 3616 num_processed = 0; 3617 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3618 & EQE_VALID_MASK) { 3619 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3620 queue_tail_inc(eq); 3621 eqe = queue_tail_node(eq); 3622 num_processed++; 3623 } 3624 3625 if (num_processed) 3626 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3627 } 3628 } 3629 3630 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3631 { 3632 struct be_queue_info *q; 3633 struct be_ctrl_info *ctrl = &phba->ctrl; 3634 struct hwi_controller *phwi_ctrlr; 3635 struct hwi_context_memory *phwi_context; 3636 int i, eq_for_mcc, ulp_num; 3637 3638 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3639 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3640 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3641 3642 /** 3643 * Purge all EQ entries that may have been left out. This is to 3644 * workaround a problem we've seen occasionally where driver gets an 3645 * interrupt with EQ entry bit set after stopping the controller. 3646 */ 3647 hwi_purge_eq(phba); 3648 3649 phwi_ctrlr = phba->phwi_ctrlr; 3650 phwi_context = phwi_ctrlr->phwi_ctxt; 3651 3652 be_cmd_iscsi_remove_template_hdr(ctrl); 3653 3654 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3655 q = &phwi_context->be_wrbq[i]; 3656 if (q->created) 3657 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3658 } 3659 kfree(phwi_context->be_wrbq); 3660 free_wrb_handles(phba); 3661 3662 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3663 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3664 3665 q = &phwi_context->be_def_hdrq[ulp_num]; 3666 if (q->created) 3667 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3668 3669 q = &phwi_context->be_def_dataq[ulp_num]; 3670 if (q->created) 3671 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3672 } 3673 } 3674 3675 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3676 3677 for (i = 0; i < (phba->num_cpus); i++) { 3678 q = &phwi_context->be_cq[i]; 3679 if (q->created) { 3680 be_queue_free(phba, q); 3681 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3682 } 3683 } 3684 3685 be_mcc_queues_destroy(phba); 3686 if (phba->pcidev->msix_enabled) 3687 eq_for_mcc = 1; 3688 else 3689 eq_for_mcc = 0; 3690 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3691 q = &phwi_context->be_eq[i].q; 3692 if (q->created) { 3693 be_queue_free(phba, q); 3694 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3695 } 3696 } 3697 /* this ensures complete FW cleanup */ 3698 beiscsi_cmd_function_reset(phba); 3699 /* last communication, indicate driver is unloading */ 3700 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3701 } 3702 3703 static int hwi_init_port(struct beiscsi_hba *phba) 3704 { 3705 struct hwi_controller *phwi_ctrlr; 3706 struct hwi_context_memory *phwi_context; 3707 unsigned int def_pdu_ring_sz; 3708 struct be_ctrl_info *ctrl = &phba->ctrl; 3709 int status, ulp_num; 3710 u16 nbufs; 3711 3712 phwi_ctrlr = phba->phwi_ctrlr; 3713 phwi_context = phwi_ctrlr->phwi_ctxt; 3714 /* set port optic state to unknown */ 3715 phba->optic_state = 0xff; 3716 3717 status = beiscsi_create_eqs(phba, phwi_context); 3718 if (status != 0) { 3719 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3720 "BM_%d : EQ not created\n"); 3721 goto error; 3722 } 3723 3724 status = be_mcc_queues_create(phba, phwi_context); 3725 if (status != 0) 3726 goto error; 3727 3728 status = beiscsi_check_supported_fw(ctrl, phba); 3729 if (status != 0) { 3730 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3731 "BM_%d : Unsupported fw version\n"); 3732 goto error; 3733 } 3734 3735 status = beiscsi_create_cqs(phba, phwi_context); 3736 if (status != 0) { 3737 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3738 "BM_%d : CQ not created\n"); 3739 goto error; 3740 } 3741 3742 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3743 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3744 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3745 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3746 3747 status = beiscsi_create_def_hdr(phba, phwi_context, 3748 phwi_ctrlr, 3749 def_pdu_ring_sz, 3750 ulp_num); 3751 if (status != 0) { 3752 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3753 "BM_%d : Default Header not created for ULP : %d\n", 3754 ulp_num); 3755 goto error; 3756 } 3757 3758 status = beiscsi_create_def_data(phba, phwi_context, 3759 phwi_ctrlr, 3760 def_pdu_ring_sz, 3761 ulp_num); 3762 if (status != 0) { 3763 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3764 "BM_%d : Default Data not created for ULP : %d\n", 3765 ulp_num); 3766 goto error; 3767 } 3768 /** 3769 * Now that the default PDU rings have been created, 3770 * let EP know about it. 3771 */ 3772 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3773 ulp_num, nbufs); 3774 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3775 ulp_num, nbufs); 3776 } 3777 } 3778 3779 status = beiscsi_post_pages(phba); 3780 if (status != 0) { 3781 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3782 "BM_%d : Post SGL Pages Failed\n"); 3783 goto error; 3784 } 3785 3786 status = beiscsi_post_template_hdr(phba); 3787 if (status != 0) { 3788 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3789 "BM_%d : Template HDR Posting for CXN Failed\n"); 3790 } 3791 3792 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3793 if (status != 0) { 3794 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3795 "BM_%d : WRB Rings not created\n"); 3796 goto error; 3797 } 3798 3799 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3800 uint16_t async_arr_idx = 0; 3801 3802 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3803 uint16_t cri = 0; 3804 struct hd_async_context *pasync_ctx; 3805 3806 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3807 phwi_ctrlr, ulp_num); 3808 for (cri = 0; cri < 3809 phba->params.cxns_per_ctrl; cri++) { 3810 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3811 (phwi_ctrlr, cri)) 3812 pasync_ctx->cid_to_async_cri_map[ 3813 phwi_ctrlr->wrb_context[cri].cid] = 3814 async_arr_idx++; 3815 } 3816 } 3817 } 3818 3819 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3820 "BM_%d : hwi_init_port success\n"); 3821 return 0; 3822 3823 error: 3824 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3825 "BM_%d : hwi_init_port failed"); 3826 hwi_cleanup_port(phba); 3827 return status; 3828 } 3829 3830 static int hwi_init_controller(struct beiscsi_hba *phba) 3831 { 3832 struct hwi_controller *phwi_ctrlr; 3833 3834 phwi_ctrlr = phba->phwi_ctrlr; 3835 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3836 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3837 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3838 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3839 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3840 phwi_ctrlr->phwi_ctxt); 3841 } else { 3842 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3843 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3844 "than one element.Failing to load\n"); 3845 return -ENOMEM; 3846 } 3847 3848 iscsi_init_global_templates(phba); 3849 if (beiscsi_init_wrb_handle(phba)) 3850 return -ENOMEM; 3851 3852 if (hwi_init_async_pdu_ctx(phba)) { 3853 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3854 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3855 return -ENOMEM; 3856 } 3857 3858 if (hwi_init_port(phba) != 0) { 3859 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3860 "BM_%d : hwi_init_controller failed\n"); 3861 3862 return -ENOMEM; 3863 } 3864 return 0; 3865 } 3866 3867 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3868 { 3869 struct be_mem_descriptor *mem_descr; 3870 int i, j; 3871 3872 mem_descr = phba->init_mem; 3873 for (i = 0; i < SE_MEM_MAX; i++) { 3874 for (j = mem_descr->num_elements; j > 0; j--) { 3875 dma_free_coherent(&phba->pcidev->dev, 3876 mem_descr->mem_array[j - 1].size, 3877 mem_descr->mem_array[j - 1].virtual_address, 3878 (unsigned long)mem_descr->mem_array[j - 1]. 3879 bus_address.u.a64.address); 3880 } 3881 3882 kfree(mem_descr->mem_array); 3883 mem_descr++; 3884 } 3885 kfree(phba->init_mem); 3886 kfree(phba->phwi_ctrlr->wrb_context); 3887 kfree(phba->phwi_ctrlr); 3888 } 3889 3890 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3891 { 3892 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3893 struct sgl_handle *psgl_handle; 3894 struct iscsi_sge *pfrag; 3895 unsigned int arr_index, i, idx; 3896 unsigned int ulp_icd_start, ulp_num = 0; 3897 3898 phba->io_sgl_hndl_avbl = 0; 3899 phba->eh_sgl_hndl_avbl = 0; 3900 3901 mem_descr_sglh = phba->init_mem; 3902 mem_descr_sglh += HWI_MEM_SGLH; 3903 if (1 == mem_descr_sglh->num_elements) { 3904 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3905 sizeof(struct sgl_handle *), 3906 GFP_KERNEL); 3907 if (!phba->io_sgl_hndl_base) { 3908 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3909 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3910 return -ENOMEM; 3911 } 3912 phba->eh_sgl_hndl_base = 3913 kcalloc(phba->params.icds_per_ctrl - 3914 phba->params.ios_per_ctrl, 3915 sizeof(struct sgl_handle *), GFP_KERNEL); 3916 if (!phba->eh_sgl_hndl_base) { 3917 kfree(phba->io_sgl_hndl_base); 3918 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3919 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3920 return -ENOMEM; 3921 } 3922 } else { 3923 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3924 "BM_%d : HWI_MEM_SGLH is more than one element." 3925 "Failing to load\n"); 3926 return -ENOMEM; 3927 } 3928 3929 arr_index = 0; 3930 idx = 0; 3931 while (idx < mem_descr_sglh->num_elements) { 3932 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3933 3934 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3935 sizeof(struct sgl_handle)); i++) { 3936 if (arr_index < phba->params.ios_per_ctrl) { 3937 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3938 phba->io_sgl_hndl_avbl++; 3939 arr_index++; 3940 } else { 3941 phba->eh_sgl_hndl_base[arr_index - 3942 phba->params.ios_per_ctrl] = 3943 psgl_handle; 3944 arr_index++; 3945 phba->eh_sgl_hndl_avbl++; 3946 } 3947 psgl_handle++; 3948 } 3949 idx++; 3950 } 3951 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3952 "BM_%d : phba->io_sgl_hndl_avbl=%d " 3953 "phba->eh_sgl_hndl_avbl=%d\n", 3954 phba->io_sgl_hndl_avbl, 3955 phba->eh_sgl_hndl_avbl); 3956 3957 mem_descr_sg = phba->init_mem; 3958 mem_descr_sg += HWI_MEM_SGE; 3959 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3960 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3961 mem_descr_sg->num_elements); 3962 3963 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3964 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3965 break; 3966 3967 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3968 3969 arr_index = 0; 3970 idx = 0; 3971 while (idx < mem_descr_sg->num_elements) { 3972 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3973 3974 for (i = 0; 3975 i < (mem_descr_sg->mem_array[idx].size) / 3976 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3977 i++) { 3978 if (arr_index < phba->params.ios_per_ctrl) 3979 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3980 else 3981 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3982 phba->params.ios_per_ctrl]; 3983 psgl_handle->pfrag = pfrag; 3984 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3985 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3986 pfrag += phba->params.num_sge_per_io; 3987 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3988 } 3989 idx++; 3990 } 3991 phba->io_sgl_free_index = 0; 3992 phba->io_sgl_alloc_index = 0; 3993 phba->eh_sgl_free_index = 0; 3994 phba->eh_sgl_alloc_index = 0; 3995 return 0; 3996 } 3997 3998 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3999 { 4000 int ret; 4001 uint16_t i, ulp_num; 4002 struct ulp_cid_info *ptr_cid_info = NULL; 4003 4004 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4005 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4006 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4007 GFP_KERNEL); 4008 4009 if (!ptr_cid_info) { 4010 ret = -ENOMEM; 4011 goto free_memory; 4012 } 4013 4014 /* Allocate memory for CID array */ 4015 ptr_cid_info->cid_array = 4016 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4017 sizeof(*ptr_cid_info->cid_array), 4018 GFP_KERNEL); 4019 if (!ptr_cid_info->cid_array) { 4020 kfree(ptr_cid_info); 4021 ptr_cid_info = NULL; 4022 ret = -ENOMEM; 4023 4024 goto free_memory; 4025 } 4026 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4027 phba, ulp_num); 4028 4029 /* Save the cid_info_array ptr */ 4030 phba->cid_array_info[ulp_num] = ptr_cid_info; 4031 } 4032 } 4033 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4034 sizeof(struct iscsi_endpoint *), 4035 GFP_KERNEL); 4036 if (!phba->ep_array) { 4037 ret = -ENOMEM; 4038 4039 goto free_memory; 4040 } 4041 4042 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4043 sizeof(struct beiscsi_conn *), 4044 GFP_KERNEL); 4045 if (!phba->conn_table) { 4046 kfree(phba->ep_array); 4047 phba->ep_array = NULL; 4048 ret = -ENOMEM; 4049 4050 goto free_memory; 4051 } 4052 4053 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4054 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4055 4056 ptr_cid_info = phba->cid_array_info[ulp_num]; 4057 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4058 phba->phwi_ctrlr->wrb_context[i].cid; 4059 4060 } 4061 4062 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4063 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4064 ptr_cid_info = phba->cid_array_info[ulp_num]; 4065 4066 ptr_cid_info->cid_alloc = 0; 4067 ptr_cid_info->cid_free = 0; 4068 } 4069 } 4070 return 0; 4071 4072 free_memory: 4073 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4074 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4075 ptr_cid_info = phba->cid_array_info[ulp_num]; 4076 4077 if (ptr_cid_info) { 4078 kfree(ptr_cid_info->cid_array); 4079 kfree(ptr_cid_info); 4080 phba->cid_array_info[ulp_num] = NULL; 4081 } 4082 } 4083 } 4084 4085 return ret; 4086 } 4087 4088 static void hwi_enable_intr(struct beiscsi_hba *phba) 4089 { 4090 struct be_ctrl_info *ctrl = &phba->ctrl; 4091 struct hwi_controller *phwi_ctrlr; 4092 struct hwi_context_memory *phwi_context; 4093 struct be_queue_info *eq; 4094 u8 __iomem *addr; 4095 u32 reg, i; 4096 u32 enabled; 4097 4098 phwi_ctrlr = phba->phwi_ctrlr; 4099 phwi_context = phwi_ctrlr->phwi_ctxt; 4100 4101 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4102 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4103 reg = ioread32(addr); 4104 4105 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4106 if (!enabled) { 4107 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4108 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4109 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4110 iowrite32(reg, addr); 4111 } 4112 4113 if (!phba->pcidev->msix_enabled) { 4114 eq = &phwi_context->be_eq[0].q; 4115 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4116 "BM_%d : eq->id=%d\n", eq->id); 4117 4118 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4119 } else { 4120 for (i = 0; i <= phba->num_cpus; i++) { 4121 eq = &phwi_context->be_eq[i].q; 4122 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4123 "BM_%d : eq->id=%d\n", eq->id); 4124 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4125 } 4126 } 4127 } 4128 4129 static void hwi_disable_intr(struct beiscsi_hba *phba) 4130 { 4131 struct be_ctrl_info *ctrl = &phba->ctrl; 4132 4133 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4134 u32 reg = ioread32(addr); 4135 4136 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4137 if (enabled) { 4138 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4139 iowrite32(reg, addr); 4140 } else 4141 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4142 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4143 } 4144 4145 static int beiscsi_init_port(struct beiscsi_hba *phba) 4146 { 4147 int ret; 4148 4149 ret = hwi_init_controller(phba); 4150 if (ret < 0) { 4151 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4152 "BM_%d : init controller failed\n"); 4153 return ret; 4154 } 4155 ret = beiscsi_init_sgl_handle(phba); 4156 if (ret < 0) { 4157 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4158 "BM_%d : init sgl handles failed\n"); 4159 goto cleanup_port; 4160 } 4161 4162 ret = hba_setup_cid_tbls(phba); 4163 if (ret < 0) { 4164 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4165 "BM_%d : setup CID table failed\n"); 4166 kfree(phba->io_sgl_hndl_base); 4167 kfree(phba->eh_sgl_hndl_base); 4168 goto cleanup_port; 4169 } 4170 return ret; 4171 4172 cleanup_port: 4173 hwi_cleanup_port(phba); 4174 return ret; 4175 } 4176 4177 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4178 { 4179 struct ulp_cid_info *ptr_cid_info = NULL; 4180 int ulp_num; 4181 4182 kfree(phba->io_sgl_hndl_base); 4183 kfree(phba->eh_sgl_hndl_base); 4184 kfree(phba->ep_array); 4185 kfree(phba->conn_table); 4186 4187 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4188 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4189 ptr_cid_info = phba->cid_array_info[ulp_num]; 4190 4191 if (ptr_cid_info) { 4192 kfree(ptr_cid_info->cid_array); 4193 kfree(ptr_cid_info); 4194 phba->cid_array_info[ulp_num] = NULL; 4195 } 4196 } 4197 } 4198 } 4199 4200 /** 4201 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4202 * @beiscsi_conn: ptr to the conn to be cleaned up 4203 * @task: ptr to iscsi_task resource to be freed. 4204 * 4205 * Free driver mgmt resources binded to CXN. 4206 **/ 4207 void 4208 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4209 struct iscsi_task *task) 4210 { 4211 struct beiscsi_io_task *io_task; 4212 struct beiscsi_hba *phba = beiscsi_conn->phba; 4213 struct hwi_wrb_context *pwrb_context; 4214 struct hwi_controller *phwi_ctrlr; 4215 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4216 beiscsi_conn->beiscsi_conn_cid); 4217 4218 phwi_ctrlr = phba->phwi_ctrlr; 4219 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4220 4221 io_task = task->dd_data; 4222 4223 if (io_task->pwrb_handle) { 4224 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4225 io_task->pwrb_handle = NULL; 4226 } 4227 4228 if (io_task->psgl_handle) { 4229 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4230 io_task->psgl_handle = NULL; 4231 } 4232 4233 if (io_task->mtask_addr) { 4234 dma_unmap_single(&phba->pcidev->dev, 4235 io_task->mtask_addr, 4236 io_task->mtask_data_count, 4237 DMA_TO_DEVICE); 4238 io_task->mtask_addr = 0; 4239 } 4240 } 4241 4242 /** 4243 * beiscsi_cleanup_task()- Free driver resources of the task 4244 * @task: ptr to the iscsi task 4245 * 4246 **/ 4247 static void beiscsi_cleanup_task(struct iscsi_task *task) 4248 { 4249 struct beiscsi_io_task *io_task = task->dd_data; 4250 struct iscsi_conn *conn = task->conn; 4251 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4252 struct beiscsi_hba *phba = beiscsi_conn->phba; 4253 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4254 struct hwi_wrb_context *pwrb_context; 4255 struct hwi_controller *phwi_ctrlr; 4256 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4257 beiscsi_conn->beiscsi_conn_cid); 4258 4259 phwi_ctrlr = phba->phwi_ctrlr; 4260 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4261 4262 if (io_task->cmd_bhs) { 4263 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4264 io_task->bhs_pa.u.a64.address); 4265 io_task->cmd_bhs = NULL; 4266 task->hdr = NULL; 4267 } 4268 4269 if (task->sc) { 4270 if (io_task->pwrb_handle) { 4271 free_wrb_handle(phba, pwrb_context, 4272 io_task->pwrb_handle); 4273 io_task->pwrb_handle = NULL; 4274 } 4275 4276 if (io_task->psgl_handle) { 4277 free_io_sgl_handle(phba, io_task->psgl_handle); 4278 io_task->psgl_handle = NULL; 4279 } 4280 4281 if (io_task->scsi_cmnd) { 4282 if (io_task->num_sg) 4283 scsi_dma_unmap(io_task->scsi_cmnd); 4284 io_task->scsi_cmnd = NULL; 4285 } 4286 } else { 4287 if (!beiscsi_conn->login_in_progress) 4288 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4289 } 4290 } 4291 4292 void 4293 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4294 struct beiscsi_offload_params *params) 4295 { 4296 struct wrb_handle *pwrb_handle; 4297 struct hwi_wrb_context *pwrb_context = NULL; 4298 struct beiscsi_hba *phba = beiscsi_conn->phba; 4299 struct iscsi_task *task = beiscsi_conn->task; 4300 struct iscsi_session *session = task->conn->session; 4301 u32 doorbell = 0; 4302 4303 /* 4304 * We can always use 0 here because it is reserved by libiscsi for 4305 * login/startup related tasks. 4306 */ 4307 beiscsi_conn->login_in_progress = 0; 4308 spin_lock_bh(&session->back_lock); 4309 beiscsi_cleanup_task(task); 4310 spin_unlock_bh(&session->back_lock); 4311 4312 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4313 &pwrb_context); 4314 4315 /* Check for the adapter family */ 4316 if (is_chip_be2_be3r(phba)) 4317 beiscsi_offload_cxn_v0(params, pwrb_handle, 4318 phba->init_mem, 4319 pwrb_context); 4320 else 4321 beiscsi_offload_cxn_v2(params, pwrb_handle, 4322 pwrb_context); 4323 4324 be_dws_le_to_cpu(pwrb_handle->pwrb, 4325 sizeof(struct iscsi_target_context_update_wrb)); 4326 4327 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4328 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4329 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4330 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4331 iowrite32(doorbell, phba->db_va + 4332 beiscsi_conn->doorbell_offset); 4333 4334 /* 4335 * There is no completion for CONTEXT_UPDATE. The completion of next 4336 * WRB posted guarantees FW's processing and DMA'ing of it. 4337 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4338 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4339 */ 4340 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4341 phba->params.wrbs_per_cxn); 4342 beiscsi_log(phba, KERN_INFO, 4343 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4344 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4345 pwrb_handle, pwrb_context->free_index, 4346 pwrb_context->wrb_handles_available); 4347 } 4348 4349 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4350 int *index, int *age) 4351 { 4352 *index = (int)itt; 4353 if (age) 4354 *age = conn->session->age; 4355 } 4356 4357 /** 4358 * beiscsi_alloc_pdu - allocates pdu and related resources 4359 * @task: libiscsi task 4360 * @opcode: opcode of pdu for task 4361 * 4362 * This is called with the session lock held. It will allocate 4363 * the wrb and sgl if needed for the command. And it will prep 4364 * the pdu's itt. beiscsi_parse_pdu will later translate 4365 * the pdu itt to the libiscsi task itt. 4366 */ 4367 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4368 { 4369 struct beiscsi_io_task *io_task = task->dd_data; 4370 struct iscsi_conn *conn = task->conn; 4371 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4372 struct beiscsi_hba *phba = beiscsi_conn->phba; 4373 struct hwi_wrb_context *pwrb_context; 4374 struct hwi_controller *phwi_ctrlr; 4375 itt_t itt; 4376 uint16_t cri_index = 0; 4377 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4378 dma_addr_t paddr; 4379 4380 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4381 GFP_ATOMIC, &paddr); 4382 if (!io_task->cmd_bhs) 4383 return -ENOMEM; 4384 io_task->bhs_pa.u.a64.address = paddr; 4385 io_task->libiscsi_itt = (itt_t)task->itt; 4386 io_task->conn = beiscsi_conn; 4387 4388 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4389 task->hdr_max = sizeof(struct be_cmd_bhs); 4390 io_task->psgl_handle = NULL; 4391 io_task->pwrb_handle = NULL; 4392 4393 if (task->sc) { 4394 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4395 if (!io_task->psgl_handle) { 4396 beiscsi_log(phba, KERN_ERR, 4397 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4398 "BM_%d : Alloc of IO_SGL_ICD Failed " 4399 "for the CID : %d\n", 4400 beiscsi_conn->beiscsi_conn_cid); 4401 goto free_hndls; 4402 } 4403 io_task->pwrb_handle = alloc_wrb_handle(phba, 4404 beiscsi_conn->beiscsi_conn_cid, 4405 &io_task->pwrb_context); 4406 if (!io_task->pwrb_handle) { 4407 beiscsi_log(phba, KERN_ERR, 4408 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4409 "BM_%d : Alloc of WRB_HANDLE Failed " 4410 "for the CID : %d\n", 4411 beiscsi_conn->beiscsi_conn_cid); 4412 goto free_io_hndls; 4413 } 4414 } else { 4415 io_task->scsi_cmnd = NULL; 4416 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4417 beiscsi_conn->task = task; 4418 if (!beiscsi_conn->login_in_progress) { 4419 io_task->psgl_handle = (struct sgl_handle *) 4420 alloc_mgmt_sgl_handle(phba); 4421 if (!io_task->psgl_handle) { 4422 beiscsi_log(phba, KERN_ERR, 4423 BEISCSI_LOG_IO | 4424 BEISCSI_LOG_CONFIG, 4425 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4426 "for the CID : %d\n", 4427 beiscsi_conn->beiscsi_conn_cid); 4428 goto free_hndls; 4429 } 4430 4431 beiscsi_conn->login_in_progress = 1; 4432 beiscsi_conn->plogin_sgl_handle = 4433 io_task->psgl_handle; 4434 io_task->pwrb_handle = 4435 alloc_wrb_handle(phba, 4436 beiscsi_conn->beiscsi_conn_cid, 4437 &io_task->pwrb_context); 4438 if (!io_task->pwrb_handle) { 4439 beiscsi_log(phba, KERN_ERR, 4440 BEISCSI_LOG_IO | 4441 BEISCSI_LOG_CONFIG, 4442 "BM_%d : Alloc of WRB_HANDLE Failed " 4443 "for the CID : %d\n", 4444 beiscsi_conn->beiscsi_conn_cid); 4445 goto free_mgmt_hndls; 4446 } 4447 beiscsi_conn->plogin_wrb_handle = 4448 io_task->pwrb_handle; 4449 4450 } else { 4451 io_task->psgl_handle = 4452 beiscsi_conn->plogin_sgl_handle; 4453 io_task->pwrb_handle = 4454 beiscsi_conn->plogin_wrb_handle; 4455 } 4456 } else { 4457 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4458 if (!io_task->psgl_handle) { 4459 beiscsi_log(phba, KERN_ERR, 4460 BEISCSI_LOG_IO | 4461 BEISCSI_LOG_CONFIG, 4462 "BM_%d : Alloc of MGMT_SGL_ICD Failed " 4463 "for the CID : %d\n", 4464 beiscsi_conn->beiscsi_conn_cid); 4465 goto free_hndls; 4466 } 4467 io_task->pwrb_handle = 4468 alloc_wrb_handle(phba, 4469 beiscsi_conn->beiscsi_conn_cid, 4470 &io_task->pwrb_context); 4471 if (!io_task->pwrb_handle) { 4472 beiscsi_log(phba, KERN_ERR, 4473 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4474 "BM_%d : Alloc of WRB_HANDLE Failed " 4475 "for the CID : %d\n", 4476 beiscsi_conn->beiscsi_conn_cid); 4477 goto free_mgmt_hndls; 4478 } 4479 4480 } 4481 } 4482 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4483 wrb_index << 16) | (unsigned int) 4484 (io_task->psgl_handle->sgl_index)); 4485 io_task->pwrb_handle->pio_handle = task; 4486 4487 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4488 return 0; 4489 4490 free_io_hndls: 4491 free_io_sgl_handle(phba, io_task->psgl_handle); 4492 goto free_hndls; 4493 free_mgmt_hndls: 4494 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4495 io_task->psgl_handle = NULL; 4496 free_hndls: 4497 phwi_ctrlr = phba->phwi_ctrlr; 4498 cri_index = BE_GET_CRI_FROM_CID( 4499 beiscsi_conn->beiscsi_conn_cid); 4500 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4501 if (io_task->pwrb_handle) 4502 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4503 io_task->pwrb_handle = NULL; 4504 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4505 io_task->bhs_pa.u.a64.address); 4506 io_task->cmd_bhs = NULL; 4507 return -ENOMEM; 4508 } 4509 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4510 unsigned int num_sg, unsigned int xferlen, 4511 unsigned int writedir) 4512 { 4513 4514 struct beiscsi_io_task *io_task = task->dd_data; 4515 struct iscsi_conn *conn = task->conn; 4516 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4517 struct beiscsi_hba *phba = beiscsi_conn->phba; 4518 struct iscsi_wrb *pwrb = NULL; 4519 unsigned int doorbell = 0; 4520 4521 pwrb = io_task->pwrb_handle->pwrb; 4522 4523 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4524 4525 if (writedir) { 4526 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4527 INI_WR_CMD); 4528 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4529 } else { 4530 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4531 INI_RD_CMD); 4532 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4533 } 4534 4535 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4536 type, pwrb); 4537 4538 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4539 cpu_to_be16(*(unsigned short *) 4540 &io_task->cmd_bhs->iscsi_hdr.lun)); 4541 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4542 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4543 io_task->pwrb_handle->wrb_index); 4544 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4545 be32_to_cpu(task->cmdsn)); 4546 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4547 io_task->psgl_handle->sgl_index); 4548 4549 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4550 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4551 io_task->pwrb_handle->wrb_index); 4552 if (io_task->pwrb_context->plast_wrb) 4553 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4554 io_task->pwrb_context->plast_wrb, 4555 io_task->pwrb_handle->wrb_index); 4556 io_task->pwrb_context->plast_wrb = pwrb; 4557 4558 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4559 4560 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4561 doorbell |= (io_task->pwrb_handle->wrb_index & 4562 DB_DEF_PDU_WRB_INDEX_MASK) << 4563 DB_DEF_PDU_WRB_INDEX_SHIFT; 4564 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4565 iowrite32(doorbell, phba->db_va + 4566 beiscsi_conn->doorbell_offset); 4567 return 0; 4568 } 4569 4570 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4571 unsigned int num_sg, unsigned int xferlen, 4572 unsigned int writedir) 4573 { 4574 4575 struct beiscsi_io_task *io_task = task->dd_data; 4576 struct iscsi_conn *conn = task->conn; 4577 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4578 struct beiscsi_hba *phba = beiscsi_conn->phba; 4579 struct iscsi_wrb *pwrb = NULL; 4580 unsigned int doorbell = 0; 4581 4582 pwrb = io_task->pwrb_handle->pwrb; 4583 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4584 4585 if (writedir) { 4586 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4587 INI_WR_CMD); 4588 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4589 } else { 4590 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4591 INI_RD_CMD); 4592 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4593 } 4594 4595 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4596 type, pwrb); 4597 4598 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4599 cpu_to_be16(*(unsigned short *) 4600 &io_task->cmd_bhs->iscsi_hdr.lun)); 4601 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4602 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4603 io_task->pwrb_handle->wrb_index); 4604 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4605 be32_to_cpu(task->cmdsn)); 4606 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4607 io_task->psgl_handle->sgl_index); 4608 4609 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4610 4611 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4612 io_task->pwrb_handle->wrb_index); 4613 if (io_task->pwrb_context->plast_wrb) 4614 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4615 io_task->pwrb_context->plast_wrb, 4616 io_task->pwrb_handle->wrb_index); 4617 io_task->pwrb_context->plast_wrb = pwrb; 4618 4619 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4620 4621 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4622 doorbell |= (io_task->pwrb_handle->wrb_index & 4623 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4624 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4625 4626 iowrite32(doorbell, phba->db_va + 4627 beiscsi_conn->doorbell_offset); 4628 return 0; 4629 } 4630 4631 static int beiscsi_mtask(struct iscsi_task *task) 4632 { 4633 struct beiscsi_io_task *io_task = task->dd_data; 4634 struct iscsi_conn *conn = task->conn; 4635 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4636 struct beiscsi_hba *phba = beiscsi_conn->phba; 4637 struct iscsi_wrb *pwrb = NULL; 4638 unsigned int doorbell = 0; 4639 unsigned int cid; 4640 unsigned int pwrb_typeoffset = 0; 4641 int ret = 0; 4642 4643 cid = beiscsi_conn->beiscsi_conn_cid; 4644 pwrb = io_task->pwrb_handle->pwrb; 4645 4646 if (is_chip_be2_be3r(phba)) { 4647 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4648 be32_to_cpu(task->cmdsn)); 4649 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4650 io_task->pwrb_handle->wrb_index); 4651 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4652 io_task->psgl_handle->sgl_index); 4653 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4654 task->data_count); 4655 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4656 io_task->pwrb_handle->wrb_index); 4657 if (io_task->pwrb_context->plast_wrb) 4658 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4659 io_task->pwrb_context->plast_wrb, 4660 io_task->pwrb_handle->wrb_index); 4661 io_task->pwrb_context->plast_wrb = pwrb; 4662 4663 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4664 } else { 4665 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4666 be32_to_cpu(task->cmdsn)); 4667 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4668 io_task->pwrb_handle->wrb_index); 4669 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4670 io_task->psgl_handle->sgl_index); 4671 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4672 task->data_count); 4673 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4674 io_task->pwrb_handle->wrb_index); 4675 if (io_task->pwrb_context->plast_wrb) 4676 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4677 io_task->pwrb_context->plast_wrb, 4678 io_task->pwrb_handle->wrb_index); 4679 io_task->pwrb_context->plast_wrb = pwrb; 4680 4681 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4682 } 4683 4684 4685 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4686 case ISCSI_OP_LOGIN: 4687 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4688 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4689 ret = hwi_write_buffer(pwrb, task); 4690 break; 4691 case ISCSI_OP_NOOP_OUT: 4692 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4693 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4694 if (is_chip_be2_be3r(phba)) 4695 AMAP_SET_BITS(struct amap_iscsi_wrb, 4696 dmsg, pwrb, 1); 4697 else 4698 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4699 dmsg, pwrb, 1); 4700 } else { 4701 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4702 if (is_chip_be2_be3r(phba)) 4703 AMAP_SET_BITS(struct amap_iscsi_wrb, 4704 dmsg, pwrb, 0); 4705 else 4706 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4707 dmsg, pwrb, 0); 4708 } 4709 ret = hwi_write_buffer(pwrb, task); 4710 break; 4711 case ISCSI_OP_TEXT: 4712 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4713 ret = hwi_write_buffer(pwrb, task); 4714 break; 4715 case ISCSI_OP_SCSI_TMFUNC: 4716 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4717 ret = hwi_write_buffer(pwrb, task); 4718 break; 4719 case ISCSI_OP_LOGOUT: 4720 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4721 ret = hwi_write_buffer(pwrb, task); 4722 break; 4723 4724 default: 4725 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4726 "BM_%d : opcode =%d Not supported\n", 4727 task->hdr->opcode & ISCSI_OPCODE_MASK); 4728 4729 return -EINVAL; 4730 } 4731 4732 if (ret) 4733 return ret; 4734 4735 /* Set the task type */ 4736 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4737 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4738 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4739 4740 doorbell |= cid & DB_WRB_POST_CID_MASK; 4741 doorbell |= (io_task->pwrb_handle->wrb_index & 4742 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4743 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4744 iowrite32(doorbell, phba->db_va + 4745 beiscsi_conn->doorbell_offset); 4746 return 0; 4747 } 4748 4749 static int beiscsi_task_xmit(struct iscsi_task *task) 4750 { 4751 struct beiscsi_io_task *io_task = task->dd_data; 4752 struct scsi_cmnd *sc = task->sc; 4753 struct beiscsi_hba *phba; 4754 struct scatterlist *sg; 4755 int num_sg; 4756 unsigned int writedir = 0, xferlen = 0; 4757 4758 phba = io_task->conn->phba; 4759 /** 4760 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4761 * operational if FW still gets heartbeat from EP FW. Is management 4762 * path really needed to continue further? 4763 */ 4764 if (!beiscsi_hba_is_online(phba)) 4765 return -EIO; 4766 4767 if (!io_task->conn->login_in_progress) 4768 task->hdr->exp_statsn = 0; 4769 4770 if (!sc) 4771 return beiscsi_mtask(task); 4772 4773 io_task->scsi_cmnd = sc; 4774 io_task->num_sg = 0; 4775 num_sg = scsi_dma_map(sc); 4776 if (num_sg < 0) { 4777 beiscsi_log(phba, KERN_ERR, 4778 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4779 "BM_%d : scsi_dma_map Failed " 4780 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4781 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4782 io_task->libiscsi_itt, scsi_bufflen(sc)); 4783 4784 return num_sg; 4785 } 4786 /** 4787 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4788 * For management task, cleanup_task checks mtask_addr before unmapping. 4789 */ 4790 io_task->num_sg = num_sg; 4791 xferlen = scsi_bufflen(sc); 4792 sg = scsi_sglist(sc); 4793 if (sc->sc_data_direction == DMA_TO_DEVICE) 4794 writedir = 1; 4795 else 4796 writedir = 0; 4797 4798 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4799 } 4800 4801 /** 4802 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4803 * @job: job to handle 4804 */ 4805 static int beiscsi_bsg_request(struct bsg_job *job) 4806 { 4807 struct Scsi_Host *shost; 4808 struct beiscsi_hba *phba; 4809 struct iscsi_bsg_request *bsg_req = job->request; 4810 int rc = -EINVAL; 4811 unsigned int tag; 4812 struct be_dma_mem nonemb_cmd; 4813 struct be_cmd_resp_hdr *resp; 4814 struct iscsi_bsg_reply *bsg_reply = job->reply; 4815 unsigned short status, extd_status; 4816 4817 shost = iscsi_job_to_shost(job); 4818 phba = iscsi_host_priv(shost); 4819 4820 if (!beiscsi_hba_is_online(phba)) { 4821 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4822 "BM_%d : HBA in error 0x%lx\n", phba->state); 4823 return -ENXIO; 4824 } 4825 4826 switch (bsg_req->msgcode) { 4827 case ISCSI_BSG_HST_VENDOR: 4828 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 4829 job->request_payload.payload_len, 4830 &nonemb_cmd.dma, GFP_KERNEL); 4831 if (nonemb_cmd.va == NULL) { 4832 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4833 "BM_%d : Failed to allocate memory for " 4834 "beiscsi_bsg_request\n"); 4835 return -ENOMEM; 4836 } 4837 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4838 &nonemb_cmd); 4839 if (!tag) { 4840 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4841 "BM_%d : MBX Tag Allocation Failed\n"); 4842 4843 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4844 nonemb_cmd.va, nonemb_cmd.dma); 4845 return -EAGAIN; 4846 } 4847 4848 rc = wait_event_interruptible_timeout( 4849 phba->ctrl.mcc_wait[tag], 4850 phba->ctrl.mcc_tag_status[tag], 4851 msecs_to_jiffies( 4852 BEISCSI_HOST_MBX_TIMEOUT)); 4853 4854 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4855 clear_bit(MCC_TAG_STATE_RUNNING, 4856 &phba->ctrl.ptag_state[tag].tag_state); 4857 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4858 nonemb_cmd.va, nonemb_cmd.dma); 4859 return -EIO; 4860 } 4861 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4862 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4863 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4864 free_mcc_wrb(&phba->ctrl, tag); 4865 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4866 sg_copy_from_buffer(job->reply_payload.sg_list, 4867 job->reply_payload.sg_cnt, 4868 nonemb_cmd.va, (resp->response_length 4869 + sizeof(*resp))); 4870 bsg_reply->reply_payload_rcv_len = resp->response_length; 4871 bsg_reply->result = status; 4872 bsg_job_done(job, bsg_reply->result, 4873 bsg_reply->reply_payload_rcv_len); 4874 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4875 nonemb_cmd.va, nonemb_cmd.dma); 4876 if (status || extd_status) { 4877 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4878 "BM_%d : MBX Cmd Failed" 4879 " status = %d extd_status = %d\n", 4880 status, extd_status); 4881 4882 return -EIO; 4883 } else { 4884 rc = 0; 4885 } 4886 break; 4887 4888 default: 4889 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4890 "BM_%d : Unsupported bsg command: 0x%x\n", 4891 bsg_req->msgcode); 4892 break; 4893 } 4894 4895 return rc; 4896 } 4897 4898 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4899 { 4900 /* Set the logging parameter */ 4901 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4902 } 4903 4904 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4905 { 4906 if (phba->boot_struct.boot_kset) 4907 return; 4908 4909 /* skip if boot work is already in progress */ 4910 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4911 return; 4912 4913 phba->boot_struct.retry = 3; 4914 phba->boot_struct.tag = 0; 4915 phba->boot_struct.s_handle = s_handle; 4916 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4917 schedule_work(&phba->boot_work); 4918 } 4919 4920 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4921 /* 4922 * beiscsi_show_boot_tgt_info() 4923 * Boot flag info for iscsi-utilities 4924 * Bit 0 Block valid flag 4925 * Bit 1 Firmware booting selected 4926 */ 4927 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4928 { 4929 struct beiscsi_hba *phba = data; 4930 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4931 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4932 char *str = buf; 4933 int rc = -EPERM; 4934 4935 switch (type) { 4936 case ISCSI_BOOT_TGT_NAME: 4937 rc = sprintf(buf, "%.*s\n", 4938 (int)strlen(boot_sess->target_name), 4939 (char *)&boot_sess->target_name); 4940 break; 4941 case ISCSI_BOOT_TGT_IP_ADDR: 4942 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4943 rc = sprintf(buf, "%pI4\n", 4944 (char *)&boot_conn->dest_ipaddr.addr); 4945 else 4946 rc = sprintf(str, "%pI6\n", 4947 (char *)&boot_conn->dest_ipaddr.addr); 4948 break; 4949 case ISCSI_BOOT_TGT_PORT: 4950 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4951 break; 4952 4953 case ISCSI_BOOT_TGT_CHAP_NAME: 4954 rc = sprintf(str, "%.*s\n", 4955 boot_conn->negotiated_login_options.auth_data.chap. 4956 target_chap_name_length, 4957 (char *)&boot_conn->negotiated_login_options. 4958 auth_data.chap.target_chap_name); 4959 break; 4960 case ISCSI_BOOT_TGT_CHAP_SECRET: 4961 rc = sprintf(str, "%.*s\n", 4962 boot_conn->negotiated_login_options.auth_data.chap. 4963 target_secret_length, 4964 (char *)&boot_conn->negotiated_login_options. 4965 auth_data.chap.target_secret); 4966 break; 4967 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4968 rc = sprintf(str, "%.*s\n", 4969 boot_conn->negotiated_login_options.auth_data.chap. 4970 intr_chap_name_length, 4971 (char *)&boot_conn->negotiated_login_options. 4972 auth_data.chap.intr_chap_name); 4973 break; 4974 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4975 rc = sprintf(str, "%.*s\n", 4976 boot_conn->negotiated_login_options.auth_data.chap. 4977 intr_secret_length, 4978 (char *)&boot_conn->negotiated_login_options. 4979 auth_data.chap.intr_secret); 4980 break; 4981 case ISCSI_BOOT_TGT_FLAGS: 4982 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4983 break; 4984 case ISCSI_BOOT_TGT_NIC_ASSOC: 4985 rc = sprintf(str, "0\n"); 4986 break; 4987 } 4988 return rc; 4989 } 4990 4991 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 4992 { 4993 struct beiscsi_hba *phba = data; 4994 char *str = buf; 4995 int rc = -EPERM; 4996 4997 switch (type) { 4998 case ISCSI_BOOT_INI_INITIATOR_NAME: 4999 rc = sprintf(str, "%s\n", 5000 phba->boot_struct.boot_sess.initiator_iscsiname); 5001 break; 5002 } 5003 return rc; 5004 } 5005 5006 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5007 { 5008 struct beiscsi_hba *phba = data; 5009 char *str = buf; 5010 int rc = -EPERM; 5011 5012 switch (type) { 5013 case ISCSI_BOOT_ETH_FLAGS: 5014 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5015 break; 5016 case ISCSI_BOOT_ETH_INDEX: 5017 rc = sprintf(str, "0\n"); 5018 break; 5019 case ISCSI_BOOT_ETH_MAC: 5020 rc = beiscsi_get_macaddr(str, phba); 5021 break; 5022 } 5023 return rc; 5024 } 5025 5026 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5027 { 5028 umode_t rc = 0; 5029 5030 switch (type) { 5031 case ISCSI_BOOT_TGT_NAME: 5032 case ISCSI_BOOT_TGT_IP_ADDR: 5033 case ISCSI_BOOT_TGT_PORT: 5034 case ISCSI_BOOT_TGT_CHAP_NAME: 5035 case ISCSI_BOOT_TGT_CHAP_SECRET: 5036 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5037 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5038 case ISCSI_BOOT_TGT_NIC_ASSOC: 5039 case ISCSI_BOOT_TGT_FLAGS: 5040 rc = S_IRUGO; 5041 break; 5042 } 5043 return rc; 5044 } 5045 5046 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5047 { 5048 umode_t rc = 0; 5049 5050 switch (type) { 5051 case ISCSI_BOOT_INI_INITIATOR_NAME: 5052 rc = S_IRUGO; 5053 break; 5054 } 5055 return rc; 5056 } 5057 5058 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5059 { 5060 umode_t rc = 0; 5061 5062 switch (type) { 5063 case ISCSI_BOOT_ETH_FLAGS: 5064 case ISCSI_BOOT_ETH_MAC: 5065 case ISCSI_BOOT_ETH_INDEX: 5066 rc = S_IRUGO; 5067 break; 5068 } 5069 return rc; 5070 } 5071 5072 static void beiscsi_boot_kobj_release(void *data) 5073 { 5074 struct beiscsi_hba *phba = data; 5075 5076 scsi_host_put(phba->shost); 5077 } 5078 5079 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5080 { 5081 struct boot_struct *bs = &phba->boot_struct; 5082 struct iscsi_boot_kobj *boot_kobj; 5083 5084 if (bs->boot_kset) { 5085 __beiscsi_log(phba, KERN_ERR, 5086 "BM_%d: boot_kset already created\n"); 5087 return 0; 5088 } 5089 5090 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5091 if (!bs->boot_kset) { 5092 __beiscsi_log(phba, KERN_ERR, 5093 "BM_%d: boot_kset alloc failed\n"); 5094 return -ENOMEM; 5095 } 5096 5097 /* get shost ref because the show function will refer phba */ 5098 if (!scsi_host_get(phba->shost)) 5099 goto free_kset; 5100 5101 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5102 beiscsi_show_boot_tgt_info, 5103 beiscsi_tgt_get_attr_visibility, 5104 beiscsi_boot_kobj_release); 5105 if (!boot_kobj) 5106 goto put_shost; 5107 5108 if (!scsi_host_get(phba->shost)) 5109 goto free_kset; 5110 5111 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5112 beiscsi_show_boot_ini_info, 5113 beiscsi_ini_get_attr_visibility, 5114 beiscsi_boot_kobj_release); 5115 if (!boot_kobj) 5116 goto put_shost; 5117 5118 if (!scsi_host_get(phba->shost)) 5119 goto free_kset; 5120 5121 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5122 beiscsi_show_boot_eth_info, 5123 beiscsi_eth_get_attr_visibility, 5124 beiscsi_boot_kobj_release); 5125 if (!boot_kobj) 5126 goto put_shost; 5127 5128 return 0; 5129 5130 put_shost: 5131 scsi_host_put(phba->shost); 5132 free_kset: 5133 iscsi_boot_destroy_kset(bs->boot_kset); 5134 bs->boot_kset = NULL; 5135 return -ENOMEM; 5136 } 5137 5138 static void beiscsi_boot_work(struct work_struct *work) 5139 { 5140 struct beiscsi_hba *phba = 5141 container_of(work, struct beiscsi_hba, boot_work); 5142 struct boot_struct *bs = &phba->boot_struct; 5143 unsigned int tag = 0; 5144 5145 if (!beiscsi_hba_is_online(phba)) 5146 return; 5147 5148 beiscsi_log(phba, KERN_INFO, 5149 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5150 "BM_%d : %s action %d\n", 5151 __func__, phba->boot_struct.action); 5152 5153 switch (phba->boot_struct.action) { 5154 case BEISCSI_BOOT_REOPEN_SESS: 5155 tag = beiscsi_boot_reopen_sess(phba); 5156 break; 5157 case BEISCSI_BOOT_GET_SHANDLE: 5158 tag = __beiscsi_boot_get_shandle(phba, 1); 5159 break; 5160 case BEISCSI_BOOT_GET_SINFO: 5161 tag = beiscsi_boot_get_sinfo(phba); 5162 break; 5163 case BEISCSI_BOOT_LOGOUT_SESS: 5164 tag = beiscsi_boot_logout_sess(phba); 5165 break; 5166 case BEISCSI_BOOT_CREATE_KSET: 5167 beiscsi_boot_create_kset(phba); 5168 /** 5169 * updated boot_kset is made visible to all before 5170 * ending the boot work. 5171 */ 5172 mb(); 5173 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5174 return; 5175 } 5176 if (!tag) { 5177 if (bs->retry--) 5178 schedule_work(&phba->boot_work); 5179 else 5180 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5181 } 5182 } 5183 5184 static void beiscsi_eqd_update_work(struct work_struct *work) 5185 { 5186 struct hwi_context_memory *phwi_context; 5187 struct be_set_eqd set_eqd[MAX_CPUS]; 5188 struct hwi_controller *phwi_ctrlr; 5189 struct be_eq_obj *pbe_eq; 5190 struct beiscsi_hba *phba; 5191 unsigned int pps, delta; 5192 struct be_aic_obj *aic; 5193 int eqd, i, num = 0; 5194 unsigned long now; 5195 5196 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5197 if (!beiscsi_hba_is_online(phba)) 5198 return; 5199 5200 phwi_ctrlr = phba->phwi_ctrlr; 5201 phwi_context = phwi_ctrlr->phwi_ctxt; 5202 5203 for (i = 0; i <= phba->num_cpus; i++) { 5204 aic = &phba->aic_obj[i]; 5205 pbe_eq = &phwi_context->be_eq[i]; 5206 now = jiffies; 5207 if (!aic->jiffies || time_before(now, aic->jiffies) || 5208 pbe_eq->cq_count < aic->eq_prev) { 5209 aic->jiffies = now; 5210 aic->eq_prev = pbe_eq->cq_count; 5211 continue; 5212 } 5213 delta = jiffies_to_msecs(now - aic->jiffies); 5214 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5215 eqd = (pps / 1500) << 2; 5216 5217 if (eqd < 8) 5218 eqd = 0; 5219 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5220 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5221 5222 aic->jiffies = now; 5223 aic->eq_prev = pbe_eq->cq_count; 5224 5225 if (eqd != aic->prev_eqd) { 5226 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5227 set_eqd[num].eq_id = pbe_eq->q.id; 5228 aic->prev_eqd = eqd; 5229 num++; 5230 } 5231 } 5232 if (num) 5233 /* completion of this is ignored */ 5234 beiscsi_modify_eq_delay(phba, set_eqd, num); 5235 5236 schedule_delayed_work(&phba->eqd_update, 5237 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5238 } 5239 5240 static void beiscsi_hw_tpe_check(struct timer_list *t) 5241 { 5242 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5243 u32 wait; 5244 5245 /* if not TPE, do nothing */ 5246 if (!beiscsi_detect_tpe(phba)) 5247 return; 5248 5249 /* wait default 4000ms before recovering */ 5250 wait = 4000; 5251 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5252 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5253 queue_delayed_work(phba->wq, &phba->recover_port, 5254 msecs_to_jiffies(wait)); 5255 } 5256 5257 static void beiscsi_hw_health_check(struct timer_list *t) 5258 { 5259 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5260 5261 beiscsi_detect_ue(phba); 5262 if (beiscsi_detect_ue(phba)) { 5263 __beiscsi_log(phba, KERN_ERR, 5264 "BM_%d : port in error: %lx\n", phba->state); 5265 /* sessions are no longer valid, so first fail the sessions */ 5266 queue_work(phba->wq, &phba->sess_work); 5267 5268 /* detect UER supported */ 5269 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5270 return; 5271 /* modify this timer to check TPE */ 5272 phba->hw_check.function = beiscsi_hw_tpe_check; 5273 } 5274 5275 mod_timer(&phba->hw_check, 5276 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5277 } 5278 5279 /* 5280 * beiscsi_enable_port()- Enables the disabled port. 5281 * Only port resources freed in disable function are reallocated. 5282 * This is called in HBA error handling path. 5283 * 5284 * @phba: Instance of driver private structure 5285 * 5286 **/ 5287 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5288 { 5289 struct hwi_context_memory *phwi_context; 5290 struct hwi_controller *phwi_ctrlr; 5291 struct be_eq_obj *pbe_eq; 5292 int ret, i; 5293 5294 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5295 __beiscsi_log(phba, KERN_ERR, 5296 "BM_%d : %s : port is online %lx\n", 5297 __func__, phba->state); 5298 return 0; 5299 } 5300 5301 ret = beiscsi_init_sliport(phba); 5302 if (ret) 5303 return ret; 5304 5305 be2iscsi_enable_msix(phba); 5306 5307 beiscsi_get_params(phba); 5308 beiscsi_set_host_data(phba); 5309 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5310 beiscsi_set_uer_feature(phba); 5311 5312 phba->shost->max_id = phba->params.cxns_per_ctrl - 1; 5313 phba->shost->can_queue = phba->params.ios_per_ctrl; 5314 ret = beiscsi_init_port(phba); 5315 if (ret < 0) { 5316 __beiscsi_log(phba, KERN_ERR, 5317 "BM_%d : init port failed\n"); 5318 goto disable_msix; 5319 } 5320 5321 for (i = 0; i < MAX_MCC_CMD; i++) { 5322 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5323 phba->ctrl.mcc_tag[i] = i + 1; 5324 phba->ctrl.mcc_tag_status[i + 1] = 0; 5325 phba->ctrl.mcc_tag_available++; 5326 } 5327 5328 phwi_ctrlr = phba->phwi_ctrlr; 5329 phwi_context = phwi_ctrlr->phwi_ctxt; 5330 for (i = 0; i < phba->num_cpus; i++) { 5331 pbe_eq = &phwi_context->be_eq[i]; 5332 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5333 } 5334 5335 i = (phba->pcidev->msix_enabled) ? i : 0; 5336 /* Work item for MCC handling */ 5337 pbe_eq = &phwi_context->be_eq[i]; 5338 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5339 5340 ret = beiscsi_init_irqs(phba); 5341 if (ret < 0) { 5342 __beiscsi_log(phba, KERN_ERR, 5343 "BM_%d : setup IRQs failed %d\n", ret); 5344 goto cleanup_port; 5345 } 5346 hwi_enable_intr(phba); 5347 /* port operational: clear all error bits */ 5348 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5349 __beiscsi_log(phba, KERN_INFO, 5350 "BM_%d : port online: 0x%lx\n", phba->state); 5351 5352 /* start hw_check timer and eqd_update work */ 5353 schedule_delayed_work(&phba->eqd_update, 5354 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5355 5356 /** 5357 * Timer function gets modified for TPE detection. 5358 * Always reinit to do health check first. 5359 */ 5360 phba->hw_check.function = beiscsi_hw_health_check; 5361 mod_timer(&phba->hw_check, 5362 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5363 return 0; 5364 5365 cleanup_port: 5366 for (i = 0; i < phba->num_cpus; i++) { 5367 pbe_eq = &phwi_context->be_eq[i]; 5368 irq_poll_disable(&pbe_eq->iopoll); 5369 } 5370 hwi_cleanup_port(phba); 5371 5372 disable_msix: 5373 pci_free_irq_vectors(phba->pcidev); 5374 return ret; 5375 } 5376 5377 /* 5378 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5379 * This is called in HBA error handling and driver removal. 5380 * @phba: Instance Priv structure 5381 * @unload: indicate driver is unloading 5382 * 5383 * Free the OS and HW resources held by the driver 5384 **/ 5385 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5386 { 5387 struct hwi_context_memory *phwi_context; 5388 struct hwi_controller *phwi_ctrlr; 5389 struct be_eq_obj *pbe_eq; 5390 unsigned int i; 5391 5392 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5393 return; 5394 5395 phwi_ctrlr = phba->phwi_ctrlr; 5396 phwi_context = phwi_ctrlr->phwi_ctxt; 5397 hwi_disable_intr(phba); 5398 beiscsi_free_irqs(phba); 5399 pci_free_irq_vectors(phba->pcidev); 5400 5401 for (i = 0; i < phba->num_cpus; i++) { 5402 pbe_eq = &phwi_context->be_eq[i]; 5403 irq_poll_disable(&pbe_eq->iopoll); 5404 } 5405 cancel_delayed_work_sync(&phba->eqd_update); 5406 cancel_work_sync(&phba->boot_work); 5407 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5408 if (!unload && beiscsi_hba_in_error(phba)) { 5409 pbe_eq = &phwi_context->be_eq[i]; 5410 cancel_work_sync(&pbe_eq->mcc_work); 5411 } 5412 hwi_cleanup_port(phba); 5413 beiscsi_cleanup_port(phba); 5414 } 5415 5416 static void beiscsi_sess_work(struct work_struct *work) 5417 { 5418 struct beiscsi_hba *phba; 5419 5420 phba = container_of(work, struct beiscsi_hba, sess_work); 5421 /* 5422 * This work gets scheduled only in case of HBA error. 5423 * Old sessions are gone so need to be re-established. 5424 * iscsi_session_failure needs process context hence this work. 5425 */ 5426 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5427 } 5428 5429 static void beiscsi_recover_port(struct work_struct *work) 5430 { 5431 struct beiscsi_hba *phba; 5432 5433 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5434 beiscsi_disable_port(phba, 0); 5435 beiscsi_enable_port(phba); 5436 } 5437 5438 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5439 pci_channel_state_t state) 5440 { 5441 struct beiscsi_hba *phba = NULL; 5442 5443 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5444 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5445 5446 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5447 "BM_%d : EEH error detected\n"); 5448 5449 /* first stop UE detection when PCI error detected */ 5450 del_timer_sync(&phba->hw_check); 5451 cancel_delayed_work_sync(&phba->recover_port); 5452 5453 /* sessions are no longer valid, so first fail the sessions */ 5454 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5455 beiscsi_disable_port(phba, 0); 5456 5457 if (state == pci_channel_io_perm_failure) { 5458 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5459 "BM_%d : EEH : State PERM Failure"); 5460 return PCI_ERS_RESULT_DISCONNECT; 5461 } 5462 5463 pci_disable_device(pdev); 5464 5465 /* The error could cause the FW to trigger a flash debug dump. 5466 * Resetting the card while flash dump is in progress 5467 * can cause it not to recover; wait for it to finish. 5468 * Wait only for first function as it is needed only once per 5469 * adapter. 5470 **/ 5471 if (pdev->devfn == 0) 5472 ssleep(30); 5473 5474 return PCI_ERS_RESULT_NEED_RESET; 5475 } 5476 5477 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5478 { 5479 struct beiscsi_hba *phba = NULL; 5480 int status = 0; 5481 5482 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5483 5484 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5485 "BM_%d : EEH Reset\n"); 5486 5487 status = pci_enable_device(pdev); 5488 if (status) 5489 return PCI_ERS_RESULT_DISCONNECT; 5490 5491 pci_set_master(pdev); 5492 pci_set_power_state(pdev, PCI_D0); 5493 pci_restore_state(pdev); 5494 5495 status = beiscsi_check_fw_rdy(phba); 5496 if (status) { 5497 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5498 "BM_%d : EEH Reset Completed\n"); 5499 } else { 5500 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5501 "BM_%d : EEH Reset Completion Failure\n"); 5502 return PCI_ERS_RESULT_DISCONNECT; 5503 } 5504 5505 return PCI_ERS_RESULT_RECOVERED; 5506 } 5507 5508 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5509 { 5510 struct beiscsi_hba *phba; 5511 int ret; 5512 5513 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5514 pci_save_state(pdev); 5515 5516 ret = beiscsi_enable_port(phba); 5517 if (ret) 5518 __beiscsi_log(phba, KERN_ERR, 5519 "BM_%d : AER EEH resume failed\n"); 5520 } 5521 5522 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5523 const struct pci_device_id *id) 5524 { 5525 struct hwi_context_memory *phwi_context; 5526 struct hwi_controller *phwi_ctrlr; 5527 struct beiscsi_hba *phba = NULL; 5528 struct be_eq_obj *pbe_eq; 5529 unsigned int s_handle; 5530 char wq_name[20]; 5531 int ret, i; 5532 5533 ret = beiscsi_enable_pci(pcidev); 5534 if (ret < 0) { 5535 dev_err(&pcidev->dev, 5536 "beiscsi_dev_probe - Failed to enable pci device\n"); 5537 return ret; 5538 } 5539 5540 phba = beiscsi_hba_alloc(pcidev); 5541 if (!phba) { 5542 dev_err(&pcidev->dev, 5543 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5544 ret = -ENOMEM; 5545 goto disable_pci; 5546 } 5547 5548 pci_save_state(pcidev); 5549 5550 /* Initialize Driver configuration Paramters */ 5551 beiscsi_hba_attrs_init(phba); 5552 5553 phba->mac_addr_set = false; 5554 5555 switch (pcidev->device) { 5556 case BE_DEVICE_ID1: 5557 case OC_DEVICE_ID1: 5558 case OC_DEVICE_ID2: 5559 phba->generation = BE_GEN2; 5560 phba->iotask_fn = beiscsi_iotask; 5561 dev_warn(&pcidev->dev, 5562 "Obsolete/Unsupported BE2 Adapter Family\n"); 5563 break; 5564 case BE_DEVICE_ID2: 5565 case OC_DEVICE_ID3: 5566 phba->generation = BE_GEN3; 5567 phba->iotask_fn = beiscsi_iotask; 5568 break; 5569 case OC_SKH_ID1: 5570 phba->generation = BE_GEN4; 5571 phba->iotask_fn = beiscsi_iotask_v2; 5572 break; 5573 default: 5574 phba->generation = 0; 5575 } 5576 5577 ret = be_ctrl_init(phba, pcidev); 5578 if (ret) { 5579 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5580 "BM_%d : be_ctrl_init failed\n"); 5581 goto free_hba; 5582 } 5583 5584 ret = beiscsi_init_sliport(phba); 5585 if (ret) 5586 goto free_hba; 5587 5588 spin_lock_init(&phba->io_sgl_lock); 5589 spin_lock_init(&phba->mgmt_sgl_lock); 5590 spin_lock_init(&phba->async_pdu_lock); 5591 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5592 if (ret != 0) { 5593 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5594 "BM_%d : Error getting fw config\n"); 5595 goto free_port; 5596 } 5597 beiscsi_get_port_name(&phba->ctrl, phba); 5598 beiscsi_get_params(phba); 5599 beiscsi_set_host_data(phba); 5600 beiscsi_set_uer_feature(phba); 5601 5602 be2iscsi_enable_msix(phba); 5603 5604 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5605 "BM_%d : num_cpus = %d\n", 5606 phba->num_cpus); 5607 5608 phba->shost->max_id = phba->params.cxns_per_ctrl; 5609 phba->shost->can_queue = phba->params.ios_per_ctrl; 5610 ret = beiscsi_get_memory(phba); 5611 if (ret < 0) { 5612 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5613 "BM_%d : alloc host mem failed\n"); 5614 goto free_port; 5615 } 5616 5617 ret = beiscsi_init_port(phba); 5618 if (ret < 0) { 5619 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5620 "BM_%d : init port failed\n"); 5621 beiscsi_free_mem(phba); 5622 goto free_port; 5623 } 5624 5625 for (i = 0; i < MAX_MCC_CMD; i++) { 5626 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5627 phba->ctrl.mcc_tag[i] = i + 1; 5628 phba->ctrl.mcc_tag_status[i + 1] = 0; 5629 phba->ctrl.mcc_tag_available++; 5630 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5631 sizeof(struct be_dma_mem)); 5632 } 5633 5634 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5635 5636 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5637 phba->shost->host_no); 5638 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5639 if (!phba->wq) { 5640 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5641 "BM_%d : beiscsi_dev_probe-" 5642 "Failed to allocate work queue\n"); 5643 ret = -ENOMEM; 5644 goto free_twq; 5645 } 5646 5647 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5648 5649 phwi_ctrlr = phba->phwi_ctrlr; 5650 phwi_context = phwi_ctrlr->phwi_ctxt; 5651 5652 for (i = 0; i < phba->num_cpus; i++) { 5653 pbe_eq = &phwi_context->be_eq[i]; 5654 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5655 } 5656 5657 i = (phba->pcidev->msix_enabled) ? i : 0; 5658 /* Work item for MCC handling */ 5659 pbe_eq = &phwi_context->be_eq[i]; 5660 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5661 5662 ret = beiscsi_init_irqs(phba); 5663 if (ret < 0) { 5664 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5665 "BM_%d : beiscsi_dev_probe-" 5666 "Failed to beiscsi_init_irqs\n"); 5667 goto disable_iopoll; 5668 } 5669 hwi_enable_intr(phba); 5670 5671 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5672 if (ret) 5673 goto free_irqs; 5674 5675 /* set online bit after port is operational */ 5676 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5677 __beiscsi_log(phba, KERN_INFO, 5678 "BM_%d : port online: 0x%lx\n", phba->state); 5679 5680 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5681 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5682 if (ret > 0) { 5683 beiscsi_start_boot_work(phba, s_handle); 5684 /** 5685 * Set this bit after starting the work to let 5686 * probe handle it first. 5687 * ASYNC event can too schedule this work. 5688 */ 5689 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5690 } 5691 5692 beiscsi_iface_create_default(phba); 5693 schedule_delayed_work(&phba->eqd_update, 5694 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5695 5696 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5697 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5698 /** 5699 * Start UE detection here. UE before this will cause stall in probe 5700 * and eventually fail the probe. 5701 */ 5702 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5703 mod_timer(&phba->hw_check, 5704 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5705 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5706 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5707 return 0; 5708 5709 free_irqs: 5710 hwi_disable_intr(phba); 5711 beiscsi_free_irqs(phba); 5712 disable_iopoll: 5713 for (i = 0; i < phba->num_cpus; i++) { 5714 pbe_eq = &phwi_context->be_eq[i]; 5715 irq_poll_disable(&pbe_eq->iopoll); 5716 } 5717 destroy_workqueue(phba->wq); 5718 free_twq: 5719 hwi_cleanup_port(phba); 5720 beiscsi_cleanup_port(phba); 5721 beiscsi_free_mem(phba); 5722 free_port: 5723 dma_free_coherent(&phba->pcidev->dev, 5724 phba->ctrl.mbox_mem_alloced.size, 5725 phba->ctrl.mbox_mem_alloced.va, 5726 phba->ctrl.mbox_mem_alloced.dma); 5727 beiscsi_unmap_pci_function(phba); 5728 free_hba: 5729 pci_disable_msix(phba->pcidev); 5730 pci_dev_put(phba->pcidev); 5731 iscsi_host_free(phba->shost); 5732 pci_set_drvdata(pcidev, NULL); 5733 disable_pci: 5734 pci_release_regions(pcidev); 5735 pci_disable_device(pcidev); 5736 return ret; 5737 } 5738 5739 static void beiscsi_remove(struct pci_dev *pcidev) 5740 { 5741 struct beiscsi_hba *phba = NULL; 5742 5743 phba = pci_get_drvdata(pcidev); 5744 if (!phba) { 5745 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5746 return; 5747 } 5748 5749 /* first stop UE detection before unloading */ 5750 del_timer_sync(&phba->hw_check); 5751 cancel_delayed_work_sync(&phba->recover_port); 5752 cancel_work_sync(&phba->sess_work); 5753 5754 beiscsi_iface_destroy_default(phba); 5755 iscsi_host_remove(phba->shost, false); 5756 beiscsi_disable_port(phba, 1); 5757 5758 /* after cancelling boot_work */ 5759 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5760 5761 /* free all resources */ 5762 destroy_workqueue(phba->wq); 5763 beiscsi_free_mem(phba); 5764 5765 /* ctrl uninit */ 5766 beiscsi_unmap_pci_function(phba); 5767 dma_free_coherent(&phba->pcidev->dev, 5768 phba->ctrl.mbox_mem_alloced.size, 5769 phba->ctrl.mbox_mem_alloced.va, 5770 phba->ctrl.mbox_mem_alloced.dma); 5771 5772 pci_dev_put(phba->pcidev); 5773 iscsi_host_free(phba->shost); 5774 pci_set_drvdata(pcidev, NULL); 5775 pci_release_regions(pcidev); 5776 pci_disable_device(pcidev); 5777 } 5778 5779 5780 static struct pci_error_handlers beiscsi_eeh_handlers = { 5781 .error_detected = beiscsi_eeh_err_detected, 5782 .slot_reset = beiscsi_eeh_reset, 5783 .resume = beiscsi_eeh_resume, 5784 }; 5785 5786 struct iscsi_transport beiscsi_iscsi_transport = { 5787 .owner = THIS_MODULE, 5788 .name = DRV_NAME, 5789 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5790 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5791 .create_session = beiscsi_session_create, 5792 .destroy_session = beiscsi_session_destroy, 5793 .create_conn = beiscsi_conn_create, 5794 .bind_conn = beiscsi_conn_bind, 5795 .unbind_conn = iscsi_conn_unbind, 5796 .destroy_conn = iscsi_conn_teardown, 5797 .attr_is_visible = beiscsi_attr_is_visible, 5798 .set_iface_param = beiscsi_iface_set_param, 5799 .get_iface_param = beiscsi_iface_get_param, 5800 .set_param = beiscsi_set_param, 5801 .get_conn_param = iscsi_conn_get_param, 5802 .get_session_param = iscsi_session_get_param, 5803 .get_host_param = beiscsi_get_host_param, 5804 .start_conn = beiscsi_conn_start, 5805 .stop_conn = iscsi_conn_stop, 5806 .send_pdu = iscsi_conn_send_pdu, 5807 .xmit_task = beiscsi_task_xmit, 5808 .cleanup_task = beiscsi_cleanup_task, 5809 .alloc_pdu = beiscsi_alloc_pdu, 5810 .parse_pdu_itt = beiscsi_parse_pdu, 5811 .get_stats = beiscsi_conn_get_stats, 5812 .get_ep_param = beiscsi_ep_get_param, 5813 .ep_connect = beiscsi_ep_connect, 5814 .ep_poll = beiscsi_ep_poll, 5815 .ep_disconnect = beiscsi_ep_disconnect, 5816 .session_recovery_timedout = iscsi_session_recovery_timedout, 5817 .bsg_request = beiscsi_bsg_request, 5818 }; 5819 5820 static struct pci_driver beiscsi_pci_driver = { 5821 .name = DRV_NAME, 5822 .probe = beiscsi_dev_probe, 5823 .remove = beiscsi_remove, 5824 .id_table = beiscsi_pci_id_table, 5825 .err_handler = &beiscsi_eeh_handlers 5826 }; 5827 5828 static int __init beiscsi_module_init(void) 5829 { 5830 int ret; 5831 5832 beiscsi_scsi_transport = 5833 iscsi_register_transport(&beiscsi_iscsi_transport); 5834 if (!beiscsi_scsi_transport) { 5835 printk(KERN_ERR 5836 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5837 return -ENOMEM; 5838 } 5839 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5840 &beiscsi_iscsi_transport); 5841 5842 ret = pci_register_driver(&beiscsi_pci_driver); 5843 if (ret) { 5844 printk(KERN_ERR 5845 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5846 goto unregister_iscsi_transport; 5847 } 5848 return 0; 5849 5850 unregister_iscsi_transport: 5851 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5852 return ret; 5853 } 5854 5855 static void __exit beiscsi_module_exit(void) 5856 { 5857 pci_unregister_driver(&beiscsi_pci_driver); 5858 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5859 } 5860 5861 module_init(beiscsi_module_init); 5862 module_exit(beiscsi_module_exit); 5863