1 /* 2 * This file is part of the Emulex Linux Device Driver for Enterprise iSCSI 3 * Host Bus Adapters. Refer to the README file included with this package 4 * for driver version and adapter compatibility. 5 * 6 * Copyright (c) 2018 Broadcom. All Rights Reserved. 7 * The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of version 2 of the GNU General Public License as published 11 * by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful. ALL EXPRESS 14 * OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY 15 * IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, 16 * OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH 17 * DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. 18 * See the GNU General Public License for more details, a copy of which 19 * can be found in the file COPYING included with this package. 20 * 21 * Contact Information: 22 * linux-drivers@broadcom.com 23 * 24 */ 25 26 #include <linux/reboot.h> 27 #include <linux/delay.h> 28 #include <linux/slab.h> 29 #include <linux/interrupt.h> 30 #include <linux/blkdev.h> 31 #include <linux/pci.h> 32 #include <linux/string.h> 33 #include <linux/kernel.h> 34 #include <linux/semaphore.h> 35 #include <linux/iscsi_boot_sysfs.h> 36 #include <linux/module.h> 37 #include <linux/bsg-lib.h> 38 #include <linux/irq_poll.h> 39 40 #include <scsi/libiscsi.h> 41 #include <scsi/scsi_bsg_iscsi.h> 42 #include <scsi/scsi_netlink.h> 43 #include <scsi/scsi_transport_iscsi.h> 44 #include <scsi/scsi_transport.h> 45 #include <scsi/scsi_cmnd.h> 46 #include <scsi/scsi_device.h> 47 #include <scsi/scsi_host.h> 48 #include <scsi/scsi.h> 49 #include "be_main.h" 50 #include "be_iscsi.h" 51 #include "be_mgmt.h" 52 #include "be_cmds.h" 53 54 static unsigned int be_iopoll_budget = 10; 55 static unsigned int be_max_phys_size = 64; 56 static unsigned int enable_msix = 1; 57 58 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 59 MODULE_VERSION(BUILD_STR); 60 MODULE_AUTHOR("Emulex Corporation"); 61 MODULE_LICENSE("GPL"); 62 module_param(be_iopoll_budget, int, 0); 63 module_param(enable_msix, int, 0); 64 module_param(be_max_phys_size, uint, S_IRUGO); 65 MODULE_PARM_DESC(be_max_phys_size, 66 "Maximum Size (In Kilobytes) of physically contiguous " 67 "memory that can be allocated. Range is 16 - 128"); 68 69 #define beiscsi_disp_param(_name)\ 70 static ssize_t \ 71 beiscsi_##_name##_disp(struct device *dev,\ 72 struct device_attribute *attrib, char *buf) \ 73 { \ 74 struct Scsi_Host *shost = class_to_shost(dev);\ 75 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 76 return snprintf(buf, PAGE_SIZE, "%d\n",\ 77 phba->attr_##_name);\ 78 } 79 80 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 81 static int \ 82 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 83 {\ 84 if (val >= _minval && val <= _maxval) {\ 85 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 86 "BA_%d : beiscsi_"#_name" updated "\ 87 "from 0x%x ==> 0x%x\n",\ 88 phba->attr_##_name, val); \ 89 phba->attr_##_name = val;\ 90 return 0;\ 91 } \ 92 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 93 "BA_%d beiscsi_"#_name" attribute "\ 94 "cannot be updated to 0x%x, "\ 95 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 96 return -EINVAL;\ 97 } 98 99 #define beiscsi_store_param(_name) \ 100 static ssize_t \ 101 beiscsi_##_name##_store(struct device *dev,\ 102 struct device_attribute *attr, const char *buf,\ 103 size_t count) \ 104 { \ 105 struct Scsi_Host *shost = class_to_shost(dev);\ 106 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 107 uint32_t param_val = 0;\ 108 if (!isdigit(buf[0]))\ 109 return -EINVAL;\ 110 if (sscanf(buf, "%i", ¶m_val) != 1)\ 111 return -EINVAL;\ 112 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 113 return strlen(buf);\ 114 else \ 115 return -EINVAL;\ 116 } 117 118 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 119 static int \ 120 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 121 { \ 122 if (val >= _minval && val <= _maxval) {\ 123 phba->attr_##_name = val;\ 124 return 0;\ 125 } \ 126 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 127 "BA_%d beiscsi_"#_name" attribute " \ 128 "cannot be updated to 0x%x, "\ 129 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 130 phba->attr_##_name = _defval;\ 131 return -EINVAL;\ 132 } 133 134 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 135 static uint beiscsi_##_name = _defval;\ 136 module_param(beiscsi_##_name, uint, S_IRUGO);\ 137 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 138 beiscsi_disp_param(_name)\ 139 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 140 beiscsi_store_param(_name)\ 141 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 142 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 143 beiscsi_##_name##_disp, beiscsi_##_name##_store) 144 145 /* 146 * When new log level added update the 147 * the MAX allowed value for log_enable 148 */ 149 BEISCSI_RW_ATTR(log_enable, 0x00, 150 0xFF, 0x00, "Enable logging Bit Mask\n" 151 "\t\t\t\tInitialization Events : 0x01\n" 152 "\t\t\t\tMailbox Events : 0x02\n" 153 "\t\t\t\tMiscellaneous Events : 0x04\n" 154 "\t\t\t\tError Handling : 0x08\n" 155 "\t\t\t\tIO Path Events : 0x10\n" 156 "\t\t\t\tConfiguration Path : 0x20\n" 157 "\t\t\t\tiSCSI Protocol : 0x40\n"); 158 159 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 160 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 161 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 162 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 163 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 164 beiscsi_active_session_disp, NULL); 165 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 166 beiscsi_free_session_disp, NULL); 167 struct device_attribute *beiscsi_attrs[] = { 168 &dev_attr_beiscsi_log_enable, 169 &dev_attr_beiscsi_drvr_ver, 170 &dev_attr_beiscsi_adapter_family, 171 &dev_attr_beiscsi_fw_ver, 172 &dev_attr_beiscsi_active_session_count, 173 &dev_attr_beiscsi_free_session_count, 174 &dev_attr_beiscsi_phys_port, 175 NULL, 176 }; 177 178 static char const *cqe_desc[] = { 179 "RESERVED_DESC", 180 "SOL_CMD_COMPLETE", 181 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 182 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 183 "CXN_KILLED_BURST_LEN_MISMATCH", 184 "CXN_KILLED_AHS_RCVD", 185 "CXN_KILLED_HDR_DIGEST_ERR", 186 "CXN_KILLED_UNKNOWN_HDR", 187 "CXN_KILLED_STALE_ITT_TTT_RCVD", 188 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 189 "CXN_KILLED_RST_RCVD", 190 "CXN_KILLED_TIMED_OUT", 191 "CXN_KILLED_RST_SENT", 192 "CXN_KILLED_FIN_RCVD", 193 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 194 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 195 "CXN_KILLED_OVER_RUN_RESIDUAL", 196 "CXN_KILLED_UNDER_RUN_RESIDUAL", 197 "CMD_KILLED_INVALID_STATSN_RCVD", 198 "CMD_KILLED_INVALID_R2T_RCVD", 199 "CMD_CXN_KILLED_LUN_INVALID", 200 "CMD_CXN_KILLED_ICD_INVALID", 201 "CMD_CXN_KILLED_ITT_INVALID", 202 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 203 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 204 "CXN_INVALIDATE_NOTIFY", 205 "CXN_INVALIDATE_INDEX_NOTIFY", 206 "CMD_INVALIDATED_NOTIFY", 207 "UNSOL_HDR_NOTIFY", 208 "UNSOL_DATA_NOTIFY", 209 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 210 "DRIVERMSG_NOTIFY", 211 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 212 "SOL_CMD_KILLED_DIF_ERR", 213 "CXN_KILLED_SYN_RCVD", 214 "CXN_KILLED_IMM_DATA_RCVD" 215 }; 216 217 static int beiscsi_slave_configure(struct scsi_device *sdev) 218 { 219 blk_queue_max_segment_size(sdev->request_queue, 65536); 220 return 0; 221 } 222 223 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 224 { 225 struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr; 226 struct iscsi_cls_session *cls_session; 227 struct beiscsi_io_task *abrt_io_task; 228 struct beiscsi_conn *beiscsi_conn; 229 struct iscsi_session *session; 230 struct invldt_cmd_tbl inv_tbl; 231 struct beiscsi_hba *phba; 232 struct iscsi_conn *conn; 233 int rc; 234 235 cls_session = starget_to_session(scsi_target(sc->device)); 236 session = cls_session->dd_data; 237 238 /* check if we raced, task just got cleaned up under us */ 239 spin_lock_bh(&session->back_lock); 240 if (!abrt_task || !abrt_task->sc) { 241 spin_unlock_bh(&session->back_lock); 242 return SUCCESS; 243 } 244 /* get a task ref till FW processes the req for the ICD used */ 245 __iscsi_get_task(abrt_task); 246 abrt_io_task = abrt_task->dd_data; 247 conn = abrt_task->conn; 248 beiscsi_conn = conn->dd_data; 249 phba = beiscsi_conn->phba; 250 /* mark WRB invalid which have been not processed by FW yet */ 251 if (is_chip_be2_be3r(phba)) { 252 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 253 abrt_io_task->pwrb_handle->pwrb, 1); 254 } else { 255 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 256 abrt_io_task->pwrb_handle->pwrb, 1); 257 } 258 inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid; 259 inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index; 260 spin_unlock_bh(&session->back_lock); 261 262 rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1); 263 iscsi_put_task(abrt_task); 264 if (rc) { 265 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 266 "BM_%d : sc %p invalidation failed %d\n", 267 sc, rc); 268 return FAILED; 269 } 270 271 return iscsi_eh_abort(sc); 272 } 273 274 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 275 { 276 struct beiscsi_invldt_cmd_tbl { 277 struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ]; 278 struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ]; 279 } *inv_tbl; 280 struct iscsi_cls_session *cls_session; 281 struct beiscsi_conn *beiscsi_conn; 282 struct beiscsi_io_task *io_task; 283 struct iscsi_session *session; 284 struct beiscsi_hba *phba; 285 struct iscsi_conn *conn; 286 struct iscsi_task *task; 287 unsigned int i, nents; 288 int rc, more = 0; 289 290 cls_session = starget_to_session(scsi_target(sc->device)); 291 session = cls_session->dd_data; 292 293 spin_lock_bh(&session->frwd_lock); 294 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 295 spin_unlock_bh(&session->frwd_lock); 296 return FAILED; 297 } 298 299 conn = session->leadconn; 300 beiscsi_conn = conn->dd_data; 301 phba = beiscsi_conn->phba; 302 303 inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC); 304 if (!inv_tbl) { 305 spin_unlock_bh(&session->frwd_lock); 306 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 307 "BM_%d : invldt_cmd_tbl alloc failed\n"); 308 return FAILED; 309 } 310 nents = 0; 311 /* take back_lock to prevent task from getting cleaned up under us */ 312 spin_lock(&session->back_lock); 313 for (i = 0; i < conn->session->cmds_max; i++) { 314 task = conn->session->cmds[i]; 315 if (!task->sc) 316 continue; 317 318 if (sc->device->lun != task->sc->device->lun) 319 continue; 320 /** 321 * Can't fit in more cmds? Normally this won't happen b'coz 322 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ. 323 */ 324 if (nents == BE_INVLDT_CMD_TBL_SZ) { 325 more = 1; 326 break; 327 } 328 329 /* get a task ref till FW processes the req for the ICD used */ 330 __iscsi_get_task(task); 331 io_task = task->dd_data; 332 /* mark WRB invalid which have been not processed by FW yet */ 333 if (is_chip_be2_be3r(phba)) { 334 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 335 io_task->pwrb_handle->pwrb, 1); 336 } else { 337 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld, 338 io_task->pwrb_handle->pwrb, 1); 339 } 340 341 inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid; 342 inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index; 343 inv_tbl->task[nents] = task; 344 nents++; 345 } 346 spin_unlock(&session->back_lock); 347 spin_unlock_bh(&session->frwd_lock); 348 349 rc = SUCCESS; 350 if (!nents) 351 goto end_reset; 352 353 if (more) { 354 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 355 "BM_%d : number of cmds exceeds size of invalidation table\n"); 356 rc = FAILED; 357 goto end_reset; 358 } 359 360 if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) { 361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 362 "BM_%d : cid %u scmds invalidation failed\n", 363 beiscsi_conn->beiscsi_conn_cid); 364 rc = FAILED; 365 } 366 367 end_reset: 368 for (i = 0; i < nents; i++) 369 iscsi_put_task(inv_tbl->task[i]); 370 kfree(inv_tbl); 371 372 if (rc == SUCCESS) 373 rc = iscsi_eh_device_reset(sc); 374 return rc; 375 } 376 377 /*------------------- PCI Driver operations and data ----------------- */ 378 static const struct pci_device_id beiscsi_pci_id_table[] = { 379 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 380 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 381 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 382 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 383 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 384 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 385 { 0 } 386 }; 387 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 388 389 390 static struct scsi_host_template beiscsi_sht = { 391 .module = THIS_MODULE, 392 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 393 .proc_name = DRV_NAME, 394 .queuecommand = iscsi_queuecommand, 395 .change_queue_depth = scsi_change_queue_depth, 396 .slave_configure = beiscsi_slave_configure, 397 .target_alloc = iscsi_target_alloc, 398 .eh_timed_out = iscsi_eh_cmd_timed_out, 399 .eh_abort_handler = beiscsi_eh_abort, 400 .eh_device_reset_handler = beiscsi_eh_device_reset, 401 .eh_target_reset_handler = iscsi_eh_session_reset, 402 .shost_attrs = beiscsi_attrs, 403 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 404 .can_queue = BE2_IO_DEPTH, 405 .this_id = -1, 406 .max_sectors = BEISCSI_MAX_SECTORS, 407 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 408 .use_clustering = ENABLE_CLUSTERING, 409 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 410 .track_queue_depth = 1, 411 }; 412 413 static struct scsi_transport_template *beiscsi_scsi_transport; 414 415 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 416 { 417 struct beiscsi_hba *phba; 418 struct Scsi_Host *shost; 419 420 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 421 if (!shost) { 422 dev_err(&pcidev->dev, 423 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 424 return NULL; 425 } 426 shost->max_id = BE2_MAX_SESSIONS; 427 shost->max_channel = 0; 428 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 429 shost->max_lun = BEISCSI_NUM_MAX_LUN; 430 shost->transportt = beiscsi_scsi_transport; 431 phba = iscsi_host_priv(shost); 432 memset(phba, 0, sizeof(*phba)); 433 phba->shost = shost; 434 phba->pcidev = pci_dev_get(pcidev); 435 pci_set_drvdata(pcidev, phba); 436 phba->interface_handle = 0xFFFFFFFF; 437 438 return phba; 439 } 440 441 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 442 { 443 if (phba->csr_va) { 444 iounmap(phba->csr_va); 445 phba->csr_va = NULL; 446 } 447 if (phba->db_va) { 448 iounmap(phba->db_va); 449 phba->db_va = NULL; 450 } 451 if (phba->pci_va) { 452 iounmap(phba->pci_va); 453 phba->pci_va = NULL; 454 } 455 } 456 457 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 458 struct pci_dev *pcidev) 459 { 460 u8 __iomem *addr; 461 int pcicfg_reg; 462 463 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 464 pci_resource_len(pcidev, 2)); 465 if (addr == NULL) 466 return -ENOMEM; 467 phba->ctrl.csr = addr; 468 phba->csr_va = addr; 469 470 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 471 if (addr == NULL) 472 goto pci_map_err; 473 phba->ctrl.db = addr; 474 phba->db_va = addr; 475 476 if (phba->generation == BE_GEN2) 477 pcicfg_reg = 1; 478 else 479 pcicfg_reg = 0; 480 481 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 482 pci_resource_len(pcidev, pcicfg_reg)); 483 484 if (addr == NULL) 485 goto pci_map_err; 486 phba->ctrl.pcicfg = addr; 487 phba->pci_va = addr; 488 return 0; 489 490 pci_map_err: 491 beiscsi_unmap_pci_function(phba); 492 return -ENOMEM; 493 } 494 495 static int beiscsi_enable_pci(struct pci_dev *pcidev) 496 { 497 int ret; 498 499 ret = pci_enable_device(pcidev); 500 if (ret) { 501 dev_err(&pcidev->dev, 502 "beiscsi_enable_pci - enable device failed\n"); 503 return ret; 504 } 505 506 ret = pci_request_regions(pcidev, DRV_NAME); 507 if (ret) { 508 dev_err(&pcidev->dev, 509 "beiscsi_enable_pci - request region failed\n"); 510 goto pci_dev_disable; 511 } 512 513 pci_set_master(pcidev); 514 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)); 515 if (ret) { 516 ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)); 517 if (ret) { 518 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 519 goto pci_region_release; 520 } 521 } 522 return 0; 523 524 pci_region_release: 525 pci_release_regions(pcidev); 526 pci_dev_disable: 527 pci_disable_device(pcidev); 528 529 return ret; 530 } 531 532 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 533 { 534 struct be_ctrl_info *ctrl = &phba->ctrl; 535 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 536 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 537 int status = 0; 538 539 ctrl->pdev = pdev; 540 status = beiscsi_map_pci_bars(phba, pdev); 541 if (status) 542 return status; 543 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 544 mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev, 545 mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL); 546 if (!mbox_mem_alloc->va) { 547 beiscsi_unmap_pci_function(phba); 548 return -ENOMEM; 549 } 550 551 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 552 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 553 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 554 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 555 mutex_init(&ctrl->mbox_lock); 556 spin_lock_init(&phba->ctrl.mcc_lock); 557 558 return status; 559 } 560 561 /** 562 * beiscsi_get_params()- Set the config paramters 563 * @phba: ptr device priv structure 564 **/ 565 static void beiscsi_get_params(struct beiscsi_hba *phba) 566 { 567 uint32_t total_cid_count = 0; 568 uint32_t total_icd_count = 0; 569 uint8_t ulp_num = 0; 570 571 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 572 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 573 574 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 575 uint32_t align_mask = 0; 576 uint32_t icd_post_per_page = 0; 577 uint32_t icd_count_unavailable = 0; 578 uint32_t icd_start = 0, icd_count = 0; 579 uint32_t icd_start_align = 0, icd_count_align = 0; 580 581 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 582 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 583 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 584 585 /* Get ICD count that can be posted on each page */ 586 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 587 sizeof(struct iscsi_sge))); 588 align_mask = (icd_post_per_page - 1); 589 590 /* Check if icd_start is aligned ICD per page posting */ 591 if (icd_start % icd_post_per_page) { 592 icd_start_align = ((icd_start + 593 icd_post_per_page) & 594 ~(align_mask)); 595 phba->fw_config. 596 iscsi_icd_start[ulp_num] = 597 icd_start_align; 598 } 599 600 icd_count_align = (icd_count & ~align_mask); 601 602 /* ICD discarded in the process of alignment */ 603 if (icd_start_align) 604 icd_count_unavailable = ((icd_start_align - 605 icd_start) + 606 (icd_count - 607 icd_count_align)); 608 609 /* Updated ICD count available */ 610 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 611 icd_count_unavailable); 612 613 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 614 "BM_%d : Aligned ICD values\n" 615 "\t ICD Start : %d\n" 616 "\t ICD Count : %d\n" 617 "\t ICD Discarded : %d\n", 618 phba->fw_config. 619 iscsi_icd_start[ulp_num], 620 phba->fw_config. 621 iscsi_icd_count[ulp_num], 622 icd_count_unavailable); 623 break; 624 } 625 } 626 627 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 628 phba->params.ios_per_ctrl = (total_icd_count - 629 (total_cid_count + 630 BE2_TMFS + BE2_NOPOUT_REQ)); 631 phba->params.cxns_per_ctrl = total_cid_count; 632 phba->params.icds_per_ctrl = total_icd_count; 633 phba->params.num_sge_per_io = BE2_SGE; 634 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 635 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 636 phba->params.num_eq_entries = 1024; 637 phba->params.num_cq_entries = 1024; 638 phba->params.wrbs_per_cxn = 256; 639 } 640 641 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 642 unsigned int id, unsigned int clr_interrupt, 643 unsigned int num_processed, 644 unsigned char rearm, unsigned char event) 645 { 646 u32 val = 0; 647 648 if (rearm) 649 val |= 1 << DB_EQ_REARM_SHIFT; 650 if (clr_interrupt) 651 val |= 1 << DB_EQ_CLR_SHIFT; 652 if (event) 653 val |= 1 << DB_EQ_EVNT_SHIFT; 654 655 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 656 /* Setting lower order EQ_ID Bits */ 657 val |= (id & DB_EQ_RING_ID_LOW_MASK); 658 659 /* Setting Higher order EQ_ID Bits */ 660 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 661 DB_EQ_RING_ID_HIGH_MASK) 662 << DB_EQ_HIGH_SET_SHIFT); 663 664 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 665 } 666 667 /** 668 * be_isr_mcc - The isr routine of the driver. 669 * @irq: Not used 670 * @dev_id: Pointer to host adapter structure 671 */ 672 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 673 { 674 struct beiscsi_hba *phba; 675 struct be_eq_entry *eqe; 676 struct be_queue_info *eq; 677 struct be_queue_info *mcc; 678 unsigned int mcc_events; 679 struct be_eq_obj *pbe_eq; 680 681 pbe_eq = dev_id; 682 eq = &pbe_eq->q; 683 phba = pbe_eq->phba; 684 mcc = &phba->ctrl.mcc_obj.cq; 685 eqe = queue_tail_node(eq); 686 687 mcc_events = 0; 688 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 689 & EQE_VALID_MASK) { 690 if (((eqe->dw[offsetof(struct amap_eq_entry, 691 resource_id) / 32] & 692 EQE_RESID_MASK) >> 16) == mcc->id) { 693 mcc_events++; 694 } 695 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 696 queue_tail_inc(eq); 697 eqe = queue_tail_node(eq); 698 } 699 700 if (mcc_events) { 701 queue_work(phba->wq, &pbe_eq->mcc_work); 702 hwi_ring_eq_db(phba, eq->id, 1, mcc_events, 1, 1); 703 } 704 return IRQ_HANDLED; 705 } 706 707 /** 708 * be_isr_msix - The isr routine of the driver. 709 * @irq: Not used 710 * @dev_id: Pointer to host adapter structure 711 */ 712 static irqreturn_t be_isr_msix(int irq, void *dev_id) 713 { 714 struct beiscsi_hba *phba; 715 struct be_queue_info *eq; 716 struct be_eq_obj *pbe_eq; 717 718 pbe_eq = dev_id; 719 eq = &pbe_eq->q; 720 721 phba = pbe_eq->phba; 722 /* disable interrupt till iopoll completes */ 723 hwi_ring_eq_db(phba, eq->id, 1, 0, 0, 1); 724 irq_poll_sched(&pbe_eq->iopoll); 725 726 return IRQ_HANDLED; 727 } 728 729 /** 730 * be_isr - The isr routine of the driver. 731 * @irq: Not used 732 * @dev_id: Pointer to host adapter structure 733 */ 734 static irqreturn_t be_isr(int irq, void *dev_id) 735 { 736 struct beiscsi_hba *phba; 737 struct hwi_controller *phwi_ctrlr; 738 struct hwi_context_memory *phwi_context; 739 struct be_eq_entry *eqe; 740 struct be_queue_info *eq; 741 struct be_queue_info *mcc; 742 unsigned int mcc_events, io_events; 743 struct be_ctrl_info *ctrl; 744 struct be_eq_obj *pbe_eq; 745 int isr, rearm; 746 747 phba = dev_id; 748 ctrl = &phba->ctrl; 749 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 750 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 751 if (!isr) 752 return IRQ_NONE; 753 754 phwi_ctrlr = phba->phwi_ctrlr; 755 phwi_context = phwi_ctrlr->phwi_ctxt; 756 pbe_eq = &phwi_context->be_eq[0]; 757 758 eq = &phwi_context->be_eq[0].q; 759 mcc = &phba->ctrl.mcc_obj.cq; 760 eqe = queue_tail_node(eq); 761 762 io_events = 0; 763 mcc_events = 0; 764 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 765 & EQE_VALID_MASK) { 766 if (((eqe->dw[offsetof(struct amap_eq_entry, 767 resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id) 768 mcc_events++; 769 else 770 io_events++; 771 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 772 queue_tail_inc(eq); 773 eqe = queue_tail_node(eq); 774 } 775 if (!io_events && !mcc_events) 776 return IRQ_NONE; 777 778 /* no need to rearm if interrupt is only for IOs */ 779 rearm = 0; 780 if (mcc_events) { 781 queue_work(phba->wq, &pbe_eq->mcc_work); 782 /* rearm for MCCQ */ 783 rearm = 1; 784 } 785 if (io_events) 786 irq_poll_sched(&pbe_eq->iopoll); 787 hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1); 788 return IRQ_HANDLED; 789 } 790 791 static void beiscsi_free_irqs(struct beiscsi_hba *phba) 792 { 793 struct hwi_context_memory *phwi_context; 794 int i; 795 796 if (!phba->pcidev->msix_enabled) { 797 if (phba->pcidev->irq) 798 free_irq(phba->pcidev->irq, phba); 799 return; 800 } 801 802 phwi_context = phba->phwi_ctrlr->phwi_ctxt; 803 for (i = 0; i <= phba->num_cpus; i++) { 804 free_irq(pci_irq_vector(phba->pcidev, i), 805 &phwi_context->be_eq[i]); 806 kfree(phba->msi_name[i]); 807 } 808 } 809 810 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 811 { 812 struct pci_dev *pcidev = phba->pcidev; 813 struct hwi_controller *phwi_ctrlr; 814 struct hwi_context_memory *phwi_context; 815 int ret, i, j; 816 817 phwi_ctrlr = phba->phwi_ctrlr; 818 phwi_context = phwi_ctrlr->phwi_ctxt; 819 820 if (pcidev->msix_enabled) { 821 for (i = 0; i < phba->num_cpus; i++) { 822 phba->msi_name[i] = kasprintf(GFP_KERNEL, 823 "beiscsi_%02x_%02x", 824 phba->shost->host_no, i); 825 if (!phba->msi_name[i]) { 826 ret = -ENOMEM; 827 goto free_msix_irqs; 828 } 829 830 ret = request_irq(pci_irq_vector(pcidev, i), 831 be_isr_msix, 0, phba->msi_name[i], 832 &phwi_context->be_eq[i]); 833 if (ret) { 834 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 835 "BM_%d : beiscsi_init_irqs-Failed to" 836 "register msix for i = %d\n", 837 i); 838 kfree(phba->msi_name[i]); 839 goto free_msix_irqs; 840 } 841 } 842 phba->msi_name[i] = kasprintf(GFP_KERNEL, "beiscsi_mcc_%02x", 843 phba->shost->host_no); 844 if (!phba->msi_name[i]) { 845 ret = -ENOMEM; 846 goto free_msix_irqs; 847 } 848 ret = request_irq(pci_irq_vector(pcidev, i), be_isr_mcc, 0, 849 phba->msi_name[i], &phwi_context->be_eq[i]); 850 if (ret) { 851 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 852 "BM_%d : beiscsi_init_irqs-" 853 "Failed to register beiscsi_msix_mcc\n"); 854 kfree(phba->msi_name[i]); 855 goto free_msix_irqs; 856 } 857 858 } else { 859 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 860 "beiscsi", phba); 861 if (ret) { 862 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 863 "BM_%d : beiscsi_init_irqs-" 864 "Failed to register irq\\n"); 865 return ret; 866 } 867 } 868 return 0; 869 free_msix_irqs: 870 for (j = i - 1; j >= 0; j--) { 871 free_irq(pci_irq_vector(pcidev, i), &phwi_context->be_eq[j]); 872 kfree(phba->msi_name[j]); 873 } 874 return ret; 875 } 876 877 void hwi_ring_cq_db(struct beiscsi_hba *phba, 878 unsigned int id, unsigned int num_processed, 879 unsigned char rearm) 880 { 881 u32 val = 0; 882 883 if (rearm) 884 val |= 1 << DB_CQ_REARM_SHIFT; 885 886 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 887 888 /* Setting lower order CQ_ID Bits */ 889 val |= (id & DB_CQ_RING_ID_LOW_MASK); 890 891 /* Setting Higher order CQ_ID Bits */ 892 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 893 DB_CQ_RING_ID_HIGH_MASK) 894 << DB_CQ_HIGH_SET_SHIFT); 895 896 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 897 } 898 899 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 900 { 901 struct sgl_handle *psgl_handle; 902 unsigned long flags; 903 904 spin_lock_irqsave(&phba->io_sgl_lock, flags); 905 if (phba->io_sgl_hndl_avbl) { 906 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 907 "BM_%d : In alloc_io_sgl_handle," 908 " io_sgl_alloc_index=%d\n", 909 phba->io_sgl_alloc_index); 910 911 psgl_handle = phba->io_sgl_hndl_base[phba-> 912 io_sgl_alloc_index]; 913 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 914 phba->io_sgl_hndl_avbl--; 915 if (phba->io_sgl_alloc_index == (phba->params. 916 ios_per_ctrl - 1)) 917 phba->io_sgl_alloc_index = 0; 918 else 919 phba->io_sgl_alloc_index++; 920 } else 921 psgl_handle = NULL; 922 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 923 return psgl_handle; 924 } 925 926 static void 927 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 928 { 929 unsigned long flags; 930 931 spin_lock_irqsave(&phba->io_sgl_lock, flags); 932 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 933 "BM_%d : In free_,io_sgl_free_index=%d\n", 934 phba->io_sgl_free_index); 935 936 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 937 /* 938 * this can happen if clean_task is called on a task that 939 * failed in xmit_task or alloc_pdu. 940 */ 941 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 942 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d, value there=%p\n", 943 phba->io_sgl_free_index, 944 phba->io_sgl_hndl_base[phba->io_sgl_free_index]); 945 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 946 return; 947 } 948 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 949 phba->io_sgl_hndl_avbl++; 950 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 951 phba->io_sgl_free_index = 0; 952 else 953 phba->io_sgl_free_index++; 954 spin_unlock_irqrestore(&phba->io_sgl_lock, flags); 955 } 956 957 static inline struct wrb_handle * 958 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context, 959 unsigned int wrbs_per_cxn) 960 { 961 struct wrb_handle *pwrb_handle; 962 unsigned long flags; 963 964 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 965 if (!pwrb_context->wrb_handles_available) { 966 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 967 return NULL; 968 } 969 pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index]; 970 pwrb_context->wrb_handles_available--; 971 if (pwrb_context->alloc_index == (wrbs_per_cxn - 1)) 972 pwrb_context->alloc_index = 0; 973 else 974 pwrb_context->alloc_index++; 975 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 976 977 if (pwrb_handle) 978 memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb)); 979 980 return pwrb_handle; 981 } 982 983 /** 984 * alloc_wrb_handle - To allocate a wrb handle 985 * @phba: The hba pointer 986 * @cid: The cid to use for allocation 987 * @pwrb_context: ptr to ptr to wrb context 988 * 989 * This happens under session_lock until submission to chip 990 */ 991 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid, 992 struct hwi_wrb_context **pcontext) 993 { 994 struct hwi_wrb_context *pwrb_context; 995 struct hwi_controller *phwi_ctrlr; 996 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 997 998 phwi_ctrlr = phba->phwi_ctrlr; 999 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1000 /* return the context address */ 1001 *pcontext = pwrb_context; 1002 return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn); 1003 } 1004 1005 static inline void 1006 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context, 1007 struct wrb_handle *pwrb_handle, 1008 unsigned int wrbs_per_cxn) 1009 { 1010 unsigned long flags; 1011 1012 spin_lock_irqsave(&pwrb_context->wrb_lock, flags); 1013 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1014 pwrb_context->wrb_handles_available++; 1015 if (pwrb_context->free_index == (wrbs_per_cxn - 1)) 1016 pwrb_context->free_index = 0; 1017 else 1018 pwrb_context->free_index++; 1019 pwrb_handle->pio_handle = NULL; 1020 spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags); 1021 } 1022 1023 /** 1024 * free_wrb_handle - To free the wrb handle back to pool 1025 * @phba: The hba pointer 1026 * @pwrb_context: The context to free from 1027 * @pwrb_handle: The wrb_handle to free 1028 * 1029 * This happens under session_lock until submission to chip 1030 */ 1031 static void 1032 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1033 struct wrb_handle *pwrb_handle) 1034 { 1035 beiscsi_put_wrb_handle(pwrb_context, 1036 pwrb_handle, 1037 phba->params.wrbs_per_cxn); 1038 beiscsi_log(phba, KERN_INFO, 1039 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1040 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1041 "wrb_handles_available=%d\n", 1042 pwrb_handle, pwrb_context->free_index, 1043 pwrb_context->wrb_handles_available); 1044 } 1045 1046 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1047 { 1048 struct sgl_handle *psgl_handle; 1049 unsigned long flags; 1050 1051 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1052 if (phba->eh_sgl_hndl_avbl) { 1053 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1054 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1055 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1056 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1057 phba->eh_sgl_alloc_index, 1058 phba->eh_sgl_alloc_index); 1059 1060 phba->eh_sgl_hndl_avbl--; 1061 if (phba->eh_sgl_alloc_index == 1062 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1063 1)) 1064 phba->eh_sgl_alloc_index = 0; 1065 else 1066 phba->eh_sgl_alloc_index++; 1067 } else 1068 psgl_handle = NULL; 1069 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1070 return psgl_handle; 1071 } 1072 1073 void 1074 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1075 { 1076 unsigned long flags; 1077 1078 spin_lock_irqsave(&phba->mgmt_sgl_lock, flags); 1079 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1080 "BM_%d : In free_mgmt_sgl_handle," 1081 "eh_sgl_free_index=%d\n", 1082 phba->eh_sgl_free_index); 1083 1084 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1085 /* 1086 * this can happen if clean_task is called on a task that 1087 * failed in xmit_task or alloc_pdu. 1088 */ 1089 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1090 "BM_%d : Double Free in eh SGL ," 1091 "eh_sgl_free_index=%d\n", 1092 phba->eh_sgl_free_index); 1093 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1094 return; 1095 } 1096 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1097 phba->eh_sgl_hndl_avbl++; 1098 if (phba->eh_sgl_free_index == 1099 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1100 phba->eh_sgl_free_index = 0; 1101 else 1102 phba->eh_sgl_free_index++; 1103 spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags); 1104 } 1105 1106 static void 1107 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1108 struct iscsi_task *task, 1109 struct common_sol_cqe *csol_cqe) 1110 { 1111 struct beiscsi_io_task *io_task = task->dd_data; 1112 struct be_status_bhs *sts_bhs = 1113 (struct be_status_bhs *)io_task->cmd_bhs; 1114 struct iscsi_conn *conn = beiscsi_conn->conn; 1115 unsigned char *sense; 1116 u32 resid = 0, exp_cmdsn, max_cmdsn; 1117 u8 rsp, status, flags; 1118 1119 exp_cmdsn = csol_cqe->exp_cmdsn; 1120 max_cmdsn = (csol_cqe->exp_cmdsn + 1121 csol_cqe->cmd_wnd - 1); 1122 rsp = csol_cqe->i_resp; 1123 status = csol_cqe->i_sts; 1124 flags = csol_cqe->i_flags; 1125 resid = csol_cqe->res_cnt; 1126 1127 if (!task->sc) { 1128 if (io_task->scsi_cmnd) { 1129 scsi_dma_unmap(io_task->scsi_cmnd); 1130 io_task->scsi_cmnd = NULL; 1131 } 1132 1133 return; 1134 } 1135 task->sc->result = (DID_OK << 16) | status; 1136 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1137 task->sc->result = DID_ERROR << 16; 1138 goto unmap; 1139 } 1140 1141 /* bidi not initially supported */ 1142 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1143 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1144 task->sc->result = DID_ERROR << 16; 1145 1146 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1147 scsi_set_resid(task->sc, resid); 1148 if (!status && (scsi_bufflen(task->sc) - resid < 1149 task->sc->underflow)) 1150 task->sc->result = DID_ERROR << 16; 1151 } 1152 } 1153 1154 if (status == SAM_STAT_CHECK_CONDITION) { 1155 u16 sense_len; 1156 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1157 1158 sense = sts_bhs->sense_info + sizeof(unsigned short); 1159 sense_len = be16_to_cpu(*slen); 1160 memcpy(task->sc->sense_buffer, sense, 1161 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1162 } 1163 1164 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1165 conn->rxdata_octets += resid; 1166 unmap: 1167 if (io_task->scsi_cmnd) { 1168 scsi_dma_unmap(io_task->scsi_cmnd); 1169 io_task->scsi_cmnd = NULL; 1170 } 1171 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1172 } 1173 1174 static void 1175 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1176 struct iscsi_task *task, 1177 struct common_sol_cqe *csol_cqe) 1178 { 1179 struct iscsi_logout_rsp *hdr; 1180 struct beiscsi_io_task *io_task = task->dd_data; 1181 struct iscsi_conn *conn = beiscsi_conn->conn; 1182 1183 hdr = (struct iscsi_logout_rsp *)task->hdr; 1184 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1185 hdr->t2wait = 5; 1186 hdr->t2retain = 0; 1187 hdr->flags = csol_cqe->i_flags; 1188 hdr->response = csol_cqe->i_resp; 1189 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1190 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1191 csol_cqe->cmd_wnd - 1); 1192 1193 hdr->dlength[0] = 0; 1194 hdr->dlength[1] = 0; 1195 hdr->dlength[2] = 0; 1196 hdr->hlength = 0; 1197 hdr->itt = io_task->libiscsi_itt; 1198 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1199 } 1200 1201 static void 1202 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1203 struct iscsi_task *task, 1204 struct common_sol_cqe *csol_cqe) 1205 { 1206 struct iscsi_tm_rsp *hdr; 1207 struct iscsi_conn *conn = beiscsi_conn->conn; 1208 struct beiscsi_io_task *io_task = task->dd_data; 1209 1210 hdr = (struct iscsi_tm_rsp *)task->hdr; 1211 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1212 hdr->flags = csol_cqe->i_flags; 1213 hdr->response = csol_cqe->i_resp; 1214 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1215 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1216 csol_cqe->cmd_wnd - 1); 1217 1218 hdr->itt = io_task->libiscsi_itt; 1219 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1220 } 1221 1222 static void 1223 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1224 struct beiscsi_hba *phba, struct sol_cqe *psol) 1225 { 1226 struct hwi_wrb_context *pwrb_context; 1227 uint16_t wrb_index, cid, cri_index; 1228 struct hwi_controller *phwi_ctrlr; 1229 struct wrb_handle *pwrb_handle; 1230 struct iscsi_session *session; 1231 struct iscsi_task *task; 1232 1233 phwi_ctrlr = phba->phwi_ctrlr; 1234 if (is_chip_be2_be3r(phba)) { 1235 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1236 wrb_idx, psol); 1237 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1238 cid, psol); 1239 } else { 1240 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1241 wrb_idx, psol); 1242 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1243 cid, psol); 1244 } 1245 1246 cri_index = BE_GET_CRI_FROM_CID(cid); 1247 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1248 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1249 session = beiscsi_conn->conn->session; 1250 spin_lock_bh(&session->back_lock); 1251 task = pwrb_handle->pio_handle; 1252 if (task) 1253 __iscsi_put_task(task); 1254 spin_unlock_bh(&session->back_lock); 1255 } 1256 1257 static void 1258 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1259 struct iscsi_task *task, 1260 struct common_sol_cqe *csol_cqe) 1261 { 1262 struct iscsi_nopin *hdr; 1263 struct iscsi_conn *conn = beiscsi_conn->conn; 1264 struct beiscsi_io_task *io_task = task->dd_data; 1265 1266 hdr = (struct iscsi_nopin *)task->hdr; 1267 hdr->flags = csol_cqe->i_flags; 1268 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1269 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1270 csol_cqe->cmd_wnd - 1); 1271 1272 hdr->opcode = ISCSI_OP_NOOP_IN; 1273 hdr->itt = io_task->libiscsi_itt; 1274 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1275 } 1276 1277 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1278 struct sol_cqe *psol, 1279 struct common_sol_cqe *csol_cqe) 1280 { 1281 if (is_chip_be2_be3r(phba)) { 1282 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1283 i_exp_cmd_sn, psol); 1284 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1285 i_res_cnt, psol); 1286 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1287 i_cmd_wnd, psol); 1288 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1289 wrb_index, psol); 1290 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1291 cid, psol); 1292 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1293 hw_sts, psol); 1294 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1295 i_resp, psol); 1296 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1297 i_sts, psol); 1298 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1299 i_flags, psol); 1300 } else { 1301 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1302 i_exp_cmd_sn, psol); 1303 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1304 i_res_cnt, psol); 1305 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1306 wrb_index, psol); 1307 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1308 cid, psol); 1309 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1310 hw_sts, psol); 1311 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1312 i_cmd_wnd, psol); 1313 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1314 cmd_cmpl, psol)) 1315 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1316 i_sts, psol); 1317 else 1318 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1319 i_sts, psol); 1320 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1321 u, psol)) 1322 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1323 1324 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1325 o, psol)) 1326 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1327 } 1328 } 1329 1330 1331 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1332 struct beiscsi_hba *phba, struct sol_cqe *psol) 1333 { 1334 struct iscsi_conn *conn = beiscsi_conn->conn; 1335 struct iscsi_session *session = conn->session; 1336 struct common_sol_cqe csol_cqe = {0}; 1337 struct hwi_wrb_context *pwrb_context; 1338 struct hwi_controller *phwi_ctrlr; 1339 struct wrb_handle *pwrb_handle; 1340 struct iscsi_task *task; 1341 uint16_t cri_index = 0; 1342 uint8_t type; 1343 1344 phwi_ctrlr = phba->phwi_ctrlr; 1345 1346 /* Copy the elements to a common structure */ 1347 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1348 1349 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1350 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1351 1352 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1353 csol_cqe.wrb_index]; 1354 1355 spin_lock_bh(&session->back_lock); 1356 task = pwrb_handle->pio_handle; 1357 if (!task) { 1358 spin_unlock_bh(&session->back_lock); 1359 return; 1360 } 1361 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1362 1363 switch (type) { 1364 case HWH_TYPE_IO: 1365 case HWH_TYPE_IO_RD: 1366 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1367 ISCSI_OP_NOOP_OUT) 1368 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1369 else 1370 be_complete_io(beiscsi_conn, task, &csol_cqe); 1371 break; 1372 1373 case HWH_TYPE_LOGOUT: 1374 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1375 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1376 else 1377 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1378 break; 1379 1380 case HWH_TYPE_LOGIN: 1381 beiscsi_log(phba, KERN_ERR, 1382 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1383 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1384 " hwi_complete_cmd- Solicited path\n"); 1385 break; 1386 1387 case HWH_TYPE_NOP: 1388 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1389 break; 1390 1391 default: 1392 beiscsi_log(phba, KERN_WARNING, 1393 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1394 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1395 "wrb_index 0x%x CID 0x%x\n", type, 1396 csol_cqe.wrb_index, 1397 csol_cqe.cid); 1398 break; 1399 } 1400 1401 spin_unlock_bh(&session->back_lock); 1402 } 1403 1404 /** 1405 * ASYNC PDUs include 1406 * a. Unsolicited NOP-In (target initiated NOP-In) 1407 * b. ASYNC Messages 1408 * c. Reject PDU 1409 * d. Login response 1410 * These headers arrive unprocessed by the EP firmware. 1411 * iSCSI layer processes them. 1412 */ 1413 static unsigned int 1414 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn, 1415 struct pdu_base *phdr, void *pdata, unsigned int dlen) 1416 { 1417 struct beiscsi_hba *phba = beiscsi_conn->phba; 1418 struct iscsi_conn *conn = beiscsi_conn->conn; 1419 struct beiscsi_io_task *io_task; 1420 struct iscsi_hdr *login_hdr; 1421 struct iscsi_task *task; 1422 u8 code; 1423 1424 code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr); 1425 switch (code) { 1426 case ISCSI_OP_NOOP_IN: 1427 pdata = NULL; 1428 dlen = 0; 1429 break; 1430 case ISCSI_OP_ASYNC_EVENT: 1431 break; 1432 case ISCSI_OP_REJECT: 1433 WARN_ON(!pdata); 1434 WARN_ON(!(dlen == 48)); 1435 beiscsi_log(phba, KERN_ERR, 1436 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1437 "BM_%d : In ISCSI_OP_REJECT\n"); 1438 break; 1439 case ISCSI_OP_LOGIN_RSP: 1440 case ISCSI_OP_TEXT_RSP: 1441 task = conn->login_task; 1442 io_task = task->dd_data; 1443 login_hdr = (struct iscsi_hdr *)phdr; 1444 login_hdr->itt = io_task->libiscsi_itt; 1445 break; 1446 default: 1447 beiscsi_log(phba, KERN_WARNING, 1448 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1449 "BM_%d : unrecognized async PDU opcode 0x%x\n", 1450 code); 1451 return 1; 1452 } 1453 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen); 1454 return 0; 1455 } 1456 1457 static inline void 1458 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx, 1459 struct hd_async_handle *pasync_handle) 1460 { 1461 pasync_handle->is_final = 0; 1462 pasync_handle->buffer_len = 0; 1463 pasync_handle->in_use = 0; 1464 list_del_init(&pasync_handle->link); 1465 } 1466 1467 static void 1468 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba, 1469 struct hd_async_context *pasync_ctx, 1470 u16 cri) 1471 { 1472 struct hd_async_handle *pasync_handle, *tmp_handle; 1473 struct list_head *plist; 1474 1475 plist = &pasync_ctx->async_entry[cri].wq.list; 1476 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) 1477 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1478 1479 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); 1480 pasync_ctx->async_entry[cri].wq.hdr_len = 0; 1481 pasync_ctx->async_entry[cri].wq.bytes_received = 0; 1482 pasync_ctx->async_entry[cri].wq.bytes_needed = 0; 1483 } 1484 1485 static struct hd_async_handle * 1486 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn, 1487 struct hd_async_context *pasync_ctx, 1488 struct i_t_dpdu_cqe *pdpdu_cqe, 1489 u8 *header) 1490 { 1491 struct beiscsi_hba *phba = beiscsi_conn->phba; 1492 struct hd_async_handle *pasync_handle; 1493 struct be_bus_address phys_addr; 1494 u16 cid, code, ci, cri; 1495 u8 final, error = 0; 1496 u32 dpl; 1497 1498 cid = beiscsi_conn->beiscsi_conn_cid; 1499 cri = BE_GET_ASYNC_CRI_FROM_CID(cid); 1500 /** 1501 * This function is invoked to get the right async_handle structure 1502 * from a given DEF PDU CQ entry. 1503 * 1504 * - index in CQ entry gives the vertical index 1505 * - address in CQ entry is the offset where the DMA last ended 1506 * - final - no more notifications for this PDU 1507 */ 1508 if (is_chip_be2_be3r(phba)) { 1509 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1510 dpl, pdpdu_cqe); 1511 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1512 index, pdpdu_cqe); 1513 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1514 final, pdpdu_cqe); 1515 } else { 1516 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1517 dpl, pdpdu_cqe); 1518 ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1519 index, pdpdu_cqe); 1520 final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1521 final, pdpdu_cqe); 1522 } 1523 1524 /** 1525 * DB addr Hi/Lo is same for BE and SKH. 1526 * Subtract the dataplacementlength to get to the base. 1527 */ 1528 phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1529 db_addr_lo, pdpdu_cqe); 1530 phys_addr.u.a32.address_lo -= dpl; 1531 phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1532 db_addr_hi, pdpdu_cqe); 1533 1534 code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe); 1535 switch (code) { 1536 case UNSOL_HDR_NOTIFY: 1537 pasync_handle = pasync_ctx->async_entry[ci].header; 1538 *header = 1; 1539 break; 1540 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1541 error = 1; 1542 case UNSOL_DATA_NOTIFY: 1543 pasync_handle = pasync_ctx->async_entry[ci].data; 1544 break; 1545 /* called only for above codes */ 1546 default: 1547 return NULL; 1548 } 1549 1550 if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address || 1551 pasync_handle->index != ci) { 1552 /* driver bug - if ci does not match async handle index */ 1553 error = 1; 1554 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1555 "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n", 1556 cid, pasync_handle->is_header ? 'H' : 'D', 1557 pasync_handle->pa.u.a64.address, 1558 pasync_handle->index, 1559 phys_addr.u.a64.address, ci); 1560 /* FW has stale address - attempt continuing by dropping */ 1561 } 1562 1563 /** 1564 * DEF PDU header and data buffers with errors should be simply 1565 * dropped as there are no consumers for it. 1566 */ 1567 if (error) { 1568 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1569 return NULL; 1570 } 1571 1572 if (pasync_handle->in_use || !list_empty(&pasync_handle->link)) { 1573 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1574 "BM_%d : cid %d async PDU handle in use - code %d ci %d addr %llx\n", 1575 cid, code, ci, phys_addr.u.a64.address); 1576 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1577 } 1578 1579 list_del_init(&pasync_handle->link); 1580 /** 1581 * Each CID is associated with unique CRI. 1582 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different. 1583 **/ 1584 pasync_handle->cri = cri; 1585 pasync_handle->is_final = final; 1586 pasync_handle->buffer_len = dpl; 1587 pasync_handle->in_use = 1; 1588 1589 return pasync_handle; 1590 } 1591 1592 static unsigned int 1593 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn, 1594 struct hd_async_context *pasync_ctx, 1595 u16 cri) 1596 { 1597 struct iscsi_session *session = beiscsi_conn->conn->session; 1598 struct hd_async_handle *pasync_handle, *plast_handle; 1599 struct beiscsi_hba *phba = beiscsi_conn->phba; 1600 void *phdr = NULL, *pdata = NULL; 1601 u32 dlen = 0, status = 0; 1602 struct list_head *plist; 1603 1604 plist = &pasync_ctx->async_entry[cri].wq.list; 1605 plast_handle = NULL; 1606 list_for_each_entry(pasync_handle, plist, link) { 1607 plast_handle = pasync_handle; 1608 /* get the header, the first entry */ 1609 if (!phdr) { 1610 phdr = pasync_handle->pbuffer; 1611 continue; 1612 } 1613 /* use first buffer to collect all the data */ 1614 if (!pdata) { 1615 pdata = pasync_handle->pbuffer; 1616 dlen = pasync_handle->buffer_len; 1617 continue; 1618 } 1619 if (!pasync_handle->buffer_len || 1620 (dlen + pasync_handle->buffer_len) > 1621 pasync_ctx->async_data.buffer_size) 1622 break; 1623 memcpy(pdata + dlen, pasync_handle->pbuffer, 1624 pasync_handle->buffer_len); 1625 dlen += pasync_handle->buffer_len; 1626 } 1627 1628 if (!plast_handle->is_final) { 1629 /* last handle should have final PDU notification from FW */ 1630 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1631 "BM_%d : cid %u %p fwd async PDU opcode %x with last handle missing - HL%u:DN%u:DR%u\n", 1632 beiscsi_conn->beiscsi_conn_cid, plast_handle, 1633 AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr), 1634 pasync_ctx->async_entry[cri].wq.hdr_len, 1635 pasync_ctx->async_entry[cri].wq.bytes_needed, 1636 pasync_ctx->async_entry[cri].wq.bytes_received); 1637 } 1638 spin_lock_bh(&session->back_lock); 1639 status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen); 1640 spin_unlock_bh(&session->back_lock); 1641 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1642 return status; 1643 } 1644 1645 static unsigned int 1646 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn, 1647 struct hd_async_context *pasync_ctx, 1648 struct hd_async_handle *pasync_handle) 1649 { 1650 unsigned int bytes_needed = 0, status = 0; 1651 u16 cri = pasync_handle->cri; 1652 struct cri_wait_queue *wq; 1653 struct beiscsi_hba *phba; 1654 struct pdu_base *ppdu; 1655 char *err = ""; 1656 1657 phba = beiscsi_conn->phba; 1658 wq = &pasync_ctx->async_entry[cri].wq; 1659 if (pasync_handle->is_header) { 1660 /* check if PDU hdr is rcv'd when old hdr not completed */ 1661 if (wq->hdr_len) { 1662 err = "incomplete"; 1663 goto drop_pdu; 1664 } 1665 ppdu = pasync_handle->pbuffer; 1666 bytes_needed = AMAP_GET_BITS(struct amap_pdu_base, 1667 data_len_hi, ppdu); 1668 bytes_needed <<= 16; 1669 bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base, 1670 data_len_lo, ppdu)); 1671 wq->hdr_len = pasync_handle->buffer_len; 1672 wq->bytes_received = 0; 1673 wq->bytes_needed = bytes_needed; 1674 list_add_tail(&pasync_handle->link, &wq->list); 1675 if (!bytes_needed) 1676 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1677 pasync_ctx, cri); 1678 } else { 1679 /* check if data received has header and is needed */ 1680 if (!wq->hdr_len || !wq->bytes_needed) { 1681 err = "header less"; 1682 goto drop_pdu; 1683 } 1684 wq->bytes_received += pasync_handle->buffer_len; 1685 /* Something got overwritten? Better catch it here. */ 1686 if (wq->bytes_received > wq->bytes_needed) { 1687 err = "overflow"; 1688 goto drop_pdu; 1689 } 1690 list_add_tail(&pasync_handle->link, &wq->list); 1691 if (wq->bytes_received == wq->bytes_needed) 1692 status = beiscsi_hdl_fwd_pdu(beiscsi_conn, 1693 pasync_ctx, cri); 1694 } 1695 return status; 1696 1697 drop_pdu: 1698 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI, 1699 "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n", 1700 beiscsi_conn->beiscsi_conn_cid, err, 1701 pasync_handle->is_header ? 'H' : 'D', 1702 wq->hdr_len, wq->bytes_needed, 1703 pasync_handle->buffer_len); 1704 /* discard this handle */ 1705 beiscsi_hdl_put_handle(pasync_ctx, pasync_handle); 1706 /* free all the other handles in cri_wait_queue */ 1707 beiscsi_hdl_purge_handles(phba, pasync_ctx, cri); 1708 /* try continuing */ 1709 return status; 1710 } 1711 1712 static void 1713 beiscsi_hdq_post_handles(struct beiscsi_hba *phba, 1714 u8 header, u8 ulp_num, u16 nbuf) 1715 { 1716 struct hd_async_handle *pasync_handle; 1717 struct hd_async_context *pasync_ctx; 1718 struct hwi_controller *phwi_ctrlr; 1719 struct phys_addr *pasync_sge; 1720 u32 ring_id, doorbell = 0; 1721 u32 doorbell_offset; 1722 u16 prod, pi; 1723 1724 phwi_ctrlr = phba->phwi_ctrlr; 1725 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1726 if (header) { 1727 pasync_sge = pasync_ctx->async_header.ring_base; 1728 pi = pasync_ctx->async_header.pi; 1729 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1730 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1731 doorbell_offset; 1732 } else { 1733 pasync_sge = pasync_ctx->async_data.ring_base; 1734 pi = pasync_ctx->async_data.pi; 1735 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1736 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1737 doorbell_offset; 1738 } 1739 1740 for (prod = 0; prod < nbuf; prod++) { 1741 if (header) 1742 pasync_handle = pasync_ctx->async_entry[pi].header; 1743 else 1744 pasync_handle = pasync_ctx->async_entry[pi].data; 1745 WARN_ON(pasync_handle->is_header != header); 1746 WARN_ON(pasync_handle->index != pi); 1747 /* setup the ring only once */ 1748 if (nbuf == pasync_ctx->num_entries) { 1749 /* note hi is lo */ 1750 pasync_sge[pi].hi = pasync_handle->pa.u.a32.address_lo; 1751 pasync_sge[pi].lo = pasync_handle->pa.u.a32.address_hi; 1752 } 1753 if (++pi == pasync_ctx->num_entries) 1754 pi = 0; 1755 } 1756 1757 if (header) 1758 pasync_ctx->async_header.pi = pi; 1759 else 1760 pasync_ctx->async_data.pi = pi; 1761 1762 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1763 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1764 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1765 doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT; 1766 iowrite32(doorbell, phba->db_va + doorbell_offset); 1767 } 1768 1769 static void 1770 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn, 1771 struct i_t_dpdu_cqe *pdpdu_cqe) 1772 { 1773 struct beiscsi_hba *phba = beiscsi_conn->phba; 1774 struct hd_async_handle *pasync_handle = NULL; 1775 struct hd_async_context *pasync_ctx; 1776 struct hwi_controller *phwi_ctrlr; 1777 u8 ulp_num, consumed, header = 0; 1778 u16 cid_cri; 1779 1780 phwi_ctrlr = phba->phwi_ctrlr; 1781 cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1782 ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri); 1783 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1784 pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx, 1785 pdpdu_cqe, &header); 1786 if (is_chip_be2_be3r(phba)) 1787 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1788 num_cons, pdpdu_cqe); 1789 else 1790 consumed = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1791 num_cons, pdpdu_cqe); 1792 if (pasync_handle) 1793 beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle); 1794 /* num_cons indicates number of 8 RQEs consumed */ 1795 if (consumed) 1796 beiscsi_hdq_post_handles(phba, header, ulp_num, 8 * consumed); 1797 } 1798 1799 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba) 1800 { 1801 struct be_queue_info *mcc_cq; 1802 struct be_mcc_compl *mcc_compl; 1803 unsigned int num_processed = 0; 1804 1805 mcc_cq = &phba->ctrl.mcc_obj.cq; 1806 mcc_compl = queue_tail_node(mcc_cq); 1807 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1808 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1809 if (beiscsi_hba_in_error(phba)) 1810 return; 1811 1812 if (num_processed >= 32) { 1813 hwi_ring_cq_db(phba, mcc_cq->id, 1814 num_processed, 0); 1815 num_processed = 0; 1816 } 1817 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1818 beiscsi_process_async_event(phba, mcc_compl); 1819 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1820 beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl); 1821 } 1822 1823 mcc_compl->flags = 0; 1824 queue_tail_inc(mcc_cq); 1825 mcc_compl = queue_tail_node(mcc_cq); 1826 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1827 num_processed++; 1828 } 1829 1830 if (num_processed > 0) 1831 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1); 1832 } 1833 1834 static void beiscsi_mcc_work(struct work_struct *work) 1835 { 1836 struct be_eq_obj *pbe_eq; 1837 struct beiscsi_hba *phba; 1838 1839 pbe_eq = container_of(work, struct be_eq_obj, mcc_work); 1840 phba = pbe_eq->phba; 1841 beiscsi_process_mcc_cq(phba); 1842 /* rearm EQ for further interrupts */ 1843 if (!beiscsi_hba_in_error(phba)) 1844 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 1845 } 1846 1847 /** 1848 * beiscsi_process_cq()- Process the Completion Queue 1849 * @pbe_eq: Event Q on which the Completion has come 1850 * @budget: Max number of events to processed 1851 * 1852 * return 1853 * Number of Completion Entries processed. 1854 **/ 1855 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget) 1856 { 1857 struct be_queue_info *cq; 1858 struct sol_cqe *sol; 1859 unsigned int total = 0; 1860 unsigned int num_processed = 0; 1861 unsigned short code = 0, cid = 0; 1862 uint16_t cri_index = 0; 1863 struct beiscsi_conn *beiscsi_conn; 1864 struct beiscsi_endpoint *beiscsi_ep; 1865 struct iscsi_endpoint *ep; 1866 struct beiscsi_hba *phba; 1867 1868 cq = pbe_eq->cq; 1869 sol = queue_tail_node(cq); 1870 phba = pbe_eq->phba; 1871 1872 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 1873 CQE_VALID_MASK) { 1874 if (beiscsi_hba_in_error(phba)) 1875 return 0; 1876 1877 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 1878 1879 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 32] & 1880 CQE_CODE_MASK); 1881 1882 /* Get the CID */ 1883 if (is_chip_be2_be3r(phba)) { 1884 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 1885 } else { 1886 if ((code == DRIVERMSG_NOTIFY) || 1887 (code == UNSOL_HDR_NOTIFY) || 1888 (code == UNSOL_DATA_NOTIFY)) 1889 cid = AMAP_GET_BITS( 1890 struct amap_i_t_dpdu_cqe_v2, 1891 cid, sol); 1892 else 1893 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1894 cid, sol); 1895 } 1896 1897 cri_index = BE_GET_CRI_FROM_CID(cid); 1898 ep = phba->ep_array[cri_index]; 1899 1900 if (ep == NULL) { 1901 /* connection has already been freed 1902 * just move on to next one 1903 */ 1904 beiscsi_log(phba, KERN_WARNING, 1905 BEISCSI_LOG_INIT, 1906 "BM_%d : proc cqe of disconn ep: cid %d\n", 1907 cid); 1908 goto proc_next_cqe; 1909 } 1910 1911 beiscsi_ep = ep->dd_data; 1912 beiscsi_conn = beiscsi_ep->conn; 1913 1914 /* replenish cq */ 1915 if (num_processed == 32) { 1916 hwi_ring_cq_db(phba, cq->id, 32, 0); 1917 num_processed = 0; 1918 } 1919 total++; 1920 1921 switch (code) { 1922 case SOL_CMD_COMPLETE: 1923 hwi_complete_cmd(beiscsi_conn, phba, sol); 1924 break; 1925 case DRIVERMSG_NOTIFY: 1926 beiscsi_log(phba, KERN_INFO, 1927 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1928 "BM_%d : Received %s[%d] on CID : %d\n", 1929 cqe_desc[code], code, cid); 1930 1931 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 1932 break; 1933 case UNSOL_HDR_NOTIFY: 1934 beiscsi_log(phba, KERN_INFO, 1935 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1936 "BM_%d : Received %s[%d] on CID : %d\n", 1937 cqe_desc[code], code, cid); 1938 1939 spin_lock_bh(&phba->async_pdu_lock); 1940 beiscsi_hdq_process_compl(beiscsi_conn, 1941 (struct i_t_dpdu_cqe *)sol); 1942 spin_unlock_bh(&phba->async_pdu_lock); 1943 break; 1944 case UNSOL_DATA_NOTIFY: 1945 beiscsi_log(phba, KERN_INFO, 1946 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1947 "BM_%d : Received %s[%d] on CID : %d\n", 1948 cqe_desc[code], code, cid); 1949 1950 spin_lock_bh(&phba->async_pdu_lock); 1951 beiscsi_hdq_process_compl(beiscsi_conn, 1952 (struct i_t_dpdu_cqe *)sol); 1953 spin_unlock_bh(&phba->async_pdu_lock); 1954 break; 1955 case CXN_INVALIDATE_INDEX_NOTIFY: 1956 case CMD_INVALIDATED_NOTIFY: 1957 case CXN_INVALIDATE_NOTIFY: 1958 beiscsi_log(phba, KERN_ERR, 1959 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1960 "BM_%d : Ignoring %s[%d] on CID : %d\n", 1961 cqe_desc[code], code, cid); 1962 break; 1963 case CXN_KILLED_HDR_DIGEST_ERR: 1964 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 1965 beiscsi_log(phba, KERN_ERR, 1966 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1967 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1968 cqe_desc[code], code, cid); 1969 break; 1970 case CMD_KILLED_INVALID_STATSN_RCVD: 1971 case CMD_KILLED_INVALID_R2T_RCVD: 1972 case CMD_CXN_KILLED_LUN_INVALID: 1973 case CMD_CXN_KILLED_ICD_INVALID: 1974 case CMD_CXN_KILLED_ITT_INVALID: 1975 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 1976 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 1977 beiscsi_log(phba, KERN_ERR, 1978 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1979 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 1980 cqe_desc[code], code, cid); 1981 break; 1982 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 1983 beiscsi_log(phba, KERN_ERR, 1984 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1985 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 1986 cqe_desc[code], code, cid); 1987 spin_lock_bh(&phba->async_pdu_lock); 1988 /* driver consumes the entry and drops the contents */ 1989 beiscsi_hdq_process_compl(beiscsi_conn, 1990 (struct i_t_dpdu_cqe *)sol); 1991 spin_unlock_bh(&phba->async_pdu_lock); 1992 break; 1993 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 1994 case CXN_KILLED_BURST_LEN_MISMATCH: 1995 case CXN_KILLED_AHS_RCVD: 1996 case CXN_KILLED_UNKNOWN_HDR: 1997 case CXN_KILLED_STALE_ITT_TTT_RCVD: 1998 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 1999 case CXN_KILLED_TIMED_OUT: 2000 case CXN_KILLED_FIN_RCVD: 2001 case CXN_KILLED_RST_SENT: 2002 case CXN_KILLED_RST_RCVD: 2003 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2004 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2005 case CXN_KILLED_OVER_RUN_RESIDUAL: 2006 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2007 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2008 beiscsi_log(phba, KERN_ERR, 2009 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2010 "BM_%d : Event %s[%d] received on CID : %d\n", 2011 cqe_desc[code], code, cid); 2012 if (beiscsi_conn) 2013 iscsi_conn_failure(beiscsi_conn->conn, 2014 ISCSI_ERR_CONN_FAILED); 2015 break; 2016 default: 2017 beiscsi_log(phba, KERN_ERR, 2018 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2019 "BM_%d : Invalid CQE Event Received Code : %d" 2020 "CID 0x%x...\n", 2021 code, cid); 2022 break; 2023 } 2024 2025 proc_next_cqe: 2026 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2027 queue_tail_inc(cq); 2028 sol = queue_tail_node(cq); 2029 num_processed++; 2030 if (total == budget) 2031 break; 2032 } 2033 2034 hwi_ring_cq_db(phba, cq->id, num_processed, 1); 2035 return total; 2036 } 2037 2038 static int be_iopoll(struct irq_poll *iop, int budget) 2039 { 2040 unsigned int ret, io_events; 2041 struct beiscsi_hba *phba; 2042 struct be_eq_obj *pbe_eq; 2043 struct be_eq_entry *eqe = NULL; 2044 struct be_queue_info *eq; 2045 2046 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2047 phba = pbe_eq->phba; 2048 if (beiscsi_hba_in_error(phba)) { 2049 irq_poll_complete(iop); 2050 return 0; 2051 } 2052 2053 io_events = 0; 2054 eq = &pbe_eq->q; 2055 eqe = queue_tail_node(eq); 2056 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] & 2057 EQE_VALID_MASK) { 2058 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 2059 queue_tail_inc(eq); 2060 eqe = queue_tail_node(eq); 2061 io_events++; 2062 } 2063 hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1); 2064 2065 ret = beiscsi_process_cq(pbe_eq, budget); 2066 pbe_eq->cq_count += ret; 2067 if (ret < budget) { 2068 irq_poll_complete(iop); 2069 beiscsi_log(phba, KERN_INFO, 2070 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2071 "BM_%d : rearm pbe_eq->q.id =%d ret %d\n", 2072 pbe_eq->q.id, ret); 2073 if (!beiscsi_hba_in_error(phba)) 2074 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2075 } 2076 return ret; 2077 } 2078 2079 static void 2080 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2081 unsigned int num_sg, struct beiscsi_io_task *io_task) 2082 { 2083 struct iscsi_sge *psgl; 2084 unsigned int sg_len, index; 2085 unsigned int sge_len = 0; 2086 unsigned long long addr; 2087 struct scatterlist *l_sg; 2088 unsigned int offset; 2089 2090 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2091 io_task->bhs_pa.u.a32.address_lo); 2092 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2093 io_task->bhs_pa.u.a32.address_hi); 2094 2095 l_sg = sg; 2096 for (index = 0; (index < num_sg) && (index < 2); index++, 2097 sg = sg_next(sg)) { 2098 if (index == 0) { 2099 sg_len = sg_dma_len(sg); 2100 addr = (u64) sg_dma_address(sg); 2101 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2102 sge0_addr_lo, pwrb, 2103 lower_32_bits(addr)); 2104 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2105 sge0_addr_hi, pwrb, 2106 upper_32_bits(addr)); 2107 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2108 sge0_len, pwrb, 2109 sg_len); 2110 sge_len = sg_len; 2111 } else { 2112 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2113 pwrb, sge_len); 2114 sg_len = sg_dma_len(sg); 2115 addr = (u64) sg_dma_address(sg); 2116 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2117 sge1_addr_lo, pwrb, 2118 lower_32_bits(addr)); 2119 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2120 sge1_addr_hi, pwrb, 2121 upper_32_bits(addr)); 2122 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2123 sge1_len, pwrb, 2124 sg_len); 2125 } 2126 } 2127 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2128 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2129 2130 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2131 2132 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2133 io_task->bhs_pa.u.a32.address_hi); 2134 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2135 io_task->bhs_pa.u.a32.address_lo); 2136 2137 if (num_sg == 1) { 2138 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2139 1); 2140 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2141 0); 2142 } else if (num_sg == 2) { 2143 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2144 0); 2145 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2146 1); 2147 } else { 2148 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2149 0); 2150 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2151 0); 2152 } 2153 2154 sg = l_sg; 2155 psgl++; 2156 psgl++; 2157 offset = 0; 2158 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2159 sg_len = sg_dma_len(sg); 2160 addr = (u64) sg_dma_address(sg); 2161 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2162 lower_32_bits(addr)); 2163 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2164 upper_32_bits(addr)); 2165 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2166 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2167 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2168 offset += sg_len; 2169 } 2170 psgl--; 2171 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2172 } 2173 2174 static void 2175 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2176 unsigned int num_sg, struct beiscsi_io_task *io_task) 2177 { 2178 struct iscsi_sge *psgl; 2179 unsigned int sg_len, index; 2180 unsigned int sge_len = 0; 2181 unsigned long long addr; 2182 struct scatterlist *l_sg; 2183 unsigned int offset; 2184 2185 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2186 io_task->bhs_pa.u.a32.address_lo); 2187 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2188 io_task->bhs_pa.u.a32.address_hi); 2189 2190 l_sg = sg; 2191 for (index = 0; (index < num_sg) && (index < 2); index++, 2192 sg = sg_next(sg)) { 2193 if (index == 0) { 2194 sg_len = sg_dma_len(sg); 2195 addr = (u64) sg_dma_address(sg); 2196 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2197 ((u32)(addr & 0xFFFFFFFF))); 2198 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2199 ((u32)(addr >> 32))); 2200 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2201 sg_len); 2202 sge_len = sg_len; 2203 } else { 2204 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2205 pwrb, sge_len); 2206 sg_len = sg_dma_len(sg); 2207 addr = (u64) sg_dma_address(sg); 2208 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2209 ((u32)(addr & 0xFFFFFFFF))); 2210 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2211 ((u32)(addr >> 32))); 2212 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2213 sg_len); 2214 } 2215 } 2216 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2217 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2218 2219 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2220 2221 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2222 io_task->bhs_pa.u.a32.address_hi); 2223 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2224 io_task->bhs_pa.u.a32.address_lo); 2225 2226 if (num_sg == 1) { 2227 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2228 1); 2229 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2230 0); 2231 } else if (num_sg == 2) { 2232 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2233 0); 2234 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2235 1); 2236 } else { 2237 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2238 0); 2239 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2240 0); 2241 } 2242 sg = l_sg; 2243 psgl++; 2244 psgl++; 2245 offset = 0; 2246 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2247 sg_len = sg_dma_len(sg); 2248 addr = (u64) sg_dma_address(sg); 2249 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2250 (addr & 0xFFFFFFFF)); 2251 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2252 (addr >> 32)); 2253 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2254 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2255 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2256 offset += sg_len; 2257 } 2258 psgl--; 2259 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2260 } 2261 2262 /** 2263 * hwi_write_buffer()- Populate the WRB with task info 2264 * @pwrb: ptr to the WRB entry 2265 * @task: iscsi task which is to be executed 2266 **/ 2267 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2268 { 2269 struct iscsi_sge *psgl; 2270 struct beiscsi_io_task *io_task = task->dd_data; 2271 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2272 struct beiscsi_hba *phba = beiscsi_conn->phba; 2273 uint8_t dsp_value = 0; 2274 2275 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2276 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2277 io_task->bhs_pa.u.a32.address_lo); 2278 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2279 io_task->bhs_pa.u.a32.address_hi); 2280 2281 if (task->data) { 2282 2283 /* Check for the data_count */ 2284 dsp_value = (task->data_count) ? 1 : 0; 2285 2286 if (is_chip_be2_be3r(phba)) 2287 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2288 pwrb, dsp_value); 2289 else 2290 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2291 pwrb, dsp_value); 2292 2293 /* Map addr only if there is data_count */ 2294 if (dsp_value) { 2295 io_task->mtask_addr = dma_map_single(&phba->pcidev->dev, 2296 task->data, 2297 task->data_count, 2298 DMA_TO_DEVICE); 2299 if (dma_mapping_error(&phba->pcidev->dev, 2300 io_task->mtask_addr)) 2301 return -ENOMEM; 2302 io_task->mtask_data_count = task->data_count; 2303 } else 2304 io_task->mtask_addr = 0; 2305 2306 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2307 lower_32_bits(io_task->mtask_addr)); 2308 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2309 upper_32_bits(io_task->mtask_addr)); 2310 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2311 task->data_count); 2312 2313 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2314 } else { 2315 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2316 io_task->mtask_addr = 0; 2317 } 2318 2319 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2320 2321 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2322 2323 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2324 io_task->bhs_pa.u.a32.address_hi); 2325 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2326 io_task->bhs_pa.u.a32.address_lo); 2327 if (task->data) { 2328 psgl++; 2329 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2330 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2331 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2332 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2333 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2334 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2335 2336 psgl++; 2337 if (task->data) { 2338 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2339 lower_32_bits(io_task->mtask_addr)); 2340 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2341 upper_32_bits(io_task->mtask_addr)); 2342 } 2343 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2344 } 2345 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2346 return 0; 2347 } 2348 2349 /** 2350 * beiscsi_find_mem_req()- Find mem needed 2351 * @phba: ptr to HBA struct 2352 **/ 2353 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2354 { 2355 uint8_t mem_descr_index, ulp_num; 2356 unsigned int num_async_pdu_buf_pages; 2357 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2358 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2359 2360 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2361 2362 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2363 BE_ISCSI_PDU_HEADER_SIZE; 2364 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2365 sizeof(struct hwi_context_memory); 2366 2367 2368 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2369 * (phba->params.wrbs_per_cxn) 2370 * phba->params.cxns_per_ctrl; 2371 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2372 (phba->params.wrbs_per_cxn); 2373 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2374 phba->params.cxns_per_ctrl); 2375 2376 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2377 phba->params.icds_per_ctrl; 2378 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2379 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2380 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2381 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2382 2383 num_async_pdu_buf_sgl_pages = 2384 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2385 phba, ulp_num) * 2386 sizeof(struct phys_addr)); 2387 2388 num_async_pdu_buf_pages = 2389 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2390 phba, ulp_num) * 2391 phba->params.defpdu_hdr_sz); 2392 2393 num_async_pdu_data_pages = 2394 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2395 phba, ulp_num) * 2396 phba->params.defpdu_data_sz); 2397 2398 num_async_pdu_data_sgl_pages = 2399 PAGES_REQUIRED(BEISCSI_ASYNC_HDQ_SIZE( 2400 phba, ulp_num) * 2401 sizeof(struct phys_addr)); 2402 2403 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2404 (ulp_num * MEM_DESCR_OFFSET)); 2405 phba->mem_req[mem_descr_index] = 2406 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2407 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2408 2409 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2410 (ulp_num * MEM_DESCR_OFFSET)); 2411 phba->mem_req[mem_descr_index] = 2412 num_async_pdu_buf_pages * 2413 PAGE_SIZE; 2414 2415 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2416 (ulp_num * MEM_DESCR_OFFSET)); 2417 phba->mem_req[mem_descr_index] = 2418 num_async_pdu_data_pages * 2419 PAGE_SIZE; 2420 2421 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2422 (ulp_num * MEM_DESCR_OFFSET)); 2423 phba->mem_req[mem_descr_index] = 2424 num_async_pdu_buf_sgl_pages * 2425 PAGE_SIZE; 2426 2427 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2428 (ulp_num * MEM_DESCR_OFFSET)); 2429 phba->mem_req[mem_descr_index] = 2430 num_async_pdu_data_sgl_pages * 2431 PAGE_SIZE; 2432 2433 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2434 (ulp_num * MEM_DESCR_OFFSET)); 2435 phba->mem_req[mem_descr_index] = 2436 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2437 sizeof(struct hd_async_handle); 2438 2439 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2440 (ulp_num * MEM_DESCR_OFFSET)); 2441 phba->mem_req[mem_descr_index] = 2442 BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2443 sizeof(struct hd_async_handle); 2444 2445 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2446 (ulp_num * MEM_DESCR_OFFSET)); 2447 phba->mem_req[mem_descr_index] = 2448 sizeof(struct hd_async_context) + 2449 (BEISCSI_ASYNC_HDQ_SIZE(phba, ulp_num) * 2450 sizeof(struct hd_async_entry)); 2451 } 2452 } 2453 } 2454 2455 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2456 { 2457 dma_addr_t bus_add; 2458 struct hwi_controller *phwi_ctrlr; 2459 struct be_mem_descriptor *mem_descr; 2460 struct mem_array *mem_arr, *mem_arr_orig; 2461 unsigned int i, j, alloc_size, curr_alloc_size; 2462 2463 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2464 if (!phba->phwi_ctrlr) 2465 return -ENOMEM; 2466 2467 /* Allocate memory for wrb_context */ 2468 phwi_ctrlr = phba->phwi_ctrlr; 2469 phwi_ctrlr->wrb_context = kcalloc(phba->params.cxns_per_ctrl, 2470 sizeof(struct hwi_wrb_context), 2471 GFP_KERNEL); 2472 if (!phwi_ctrlr->wrb_context) { 2473 kfree(phba->phwi_ctrlr); 2474 return -ENOMEM; 2475 } 2476 2477 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2478 GFP_KERNEL); 2479 if (!phba->init_mem) { 2480 kfree(phwi_ctrlr->wrb_context); 2481 kfree(phba->phwi_ctrlr); 2482 return -ENOMEM; 2483 } 2484 2485 mem_arr_orig = kmalloc_array(BEISCSI_MAX_FRAGS_INIT, 2486 sizeof(*mem_arr_orig), 2487 GFP_KERNEL); 2488 if (!mem_arr_orig) { 2489 kfree(phba->init_mem); 2490 kfree(phwi_ctrlr->wrb_context); 2491 kfree(phba->phwi_ctrlr); 2492 return -ENOMEM; 2493 } 2494 2495 mem_descr = phba->init_mem; 2496 for (i = 0; i < SE_MEM_MAX; i++) { 2497 if (!phba->mem_req[i]) { 2498 mem_descr->mem_array = NULL; 2499 mem_descr++; 2500 continue; 2501 } 2502 2503 j = 0; 2504 mem_arr = mem_arr_orig; 2505 alloc_size = phba->mem_req[i]; 2506 memset(mem_arr, 0, sizeof(struct mem_array) * 2507 BEISCSI_MAX_FRAGS_INIT); 2508 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2509 do { 2510 mem_arr->virtual_address = 2511 dma_alloc_coherent(&phba->pcidev->dev, 2512 curr_alloc_size, &bus_add, GFP_KERNEL); 2513 if (!mem_arr->virtual_address) { 2514 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2515 goto free_mem; 2516 if (curr_alloc_size - 2517 rounddown_pow_of_two(curr_alloc_size)) 2518 curr_alloc_size = rounddown_pow_of_two 2519 (curr_alloc_size); 2520 else 2521 curr_alloc_size = curr_alloc_size / 2; 2522 } else { 2523 mem_arr->bus_address.u. 2524 a64.address = (__u64) bus_add; 2525 mem_arr->size = curr_alloc_size; 2526 alloc_size -= curr_alloc_size; 2527 curr_alloc_size = min(be_max_phys_size * 2528 1024, alloc_size); 2529 j++; 2530 mem_arr++; 2531 } 2532 } while (alloc_size); 2533 mem_descr->num_elements = j; 2534 mem_descr->size_in_bytes = phba->mem_req[i]; 2535 mem_descr->mem_array = kmalloc_array(j, sizeof(*mem_arr), 2536 GFP_KERNEL); 2537 if (!mem_descr->mem_array) 2538 goto free_mem; 2539 2540 memcpy(mem_descr->mem_array, mem_arr_orig, 2541 sizeof(struct mem_array) * j); 2542 mem_descr++; 2543 } 2544 kfree(mem_arr_orig); 2545 return 0; 2546 free_mem: 2547 mem_descr->num_elements = j; 2548 while ((i) || (j)) { 2549 for (j = mem_descr->num_elements; j > 0; j--) { 2550 dma_free_coherent(&phba->pcidev->dev, 2551 mem_descr->mem_array[j - 1].size, 2552 mem_descr->mem_array[j - 1]. 2553 virtual_address, 2554 (unsigned long)mem_descr-> 2555 mem_array[j - 1]. 2556 bus_address.u.a64.address); 2557 } 2558 if (i) { 2559 i--; 2560 kfree(mem_descr->mem_array); 2561 mem_descr--; 2562 } 2563 } 2564 kfree(mem_arr_orig); 2565 kfree(phba->init_mem); 2566 kfree(phba->phwi_ctrlr->wrb_context); 2567 kfree(phba->phwi_ctrlr); 2568 return -ENOMEM; 2569 } 2570 2571 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2572 { 2573 beiscsi_find_mem_req(phba); 2574 return beiscsi_alloc_mem(phba); 2575 } 2576 2577 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2578 { 2579 struct pdu_data_out *pdata_out; 2580 struct pdu_nop_out *pnop_out; 2581 struct be_mem_descriptor *mem_descr; 2582 2583 mem_descr = phba->init_mem; 2584 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2585 pdata_out = 2586 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2587 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2588 2589 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2590 IIOC_SCSI_DATA); 2591 2592 pnop_out = 2593 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2594 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2595 2596 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2597 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2598 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2599 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2600 } 2601 2602 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2603 { 2604 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2605 struct hwi_context_memory *phwi_ctxt; 2606 struct wrb_handle *pwrb_handle = NULL; 2607 struct hwi_controller *phwi_ctrlr; 2608 struct hwi_wrb_context *pwrb_context; 2609 struct iscsi_wrb *pwrb = NULL; 2610 unsigned int num_cxn_wrbh = 0; 2611 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2612 2613 mem_descr_wrbh = phba->init_mem; 2614 mem_descr_wrbh += HWI_MEM_WRBH; 2615 2616 mem_descr_wrb = phba->init_mem; 2617 mem_descr_wrb += HWI_MEM_WRB; 2618 phwi_ctrlr = phba->phwi_ctrlr; 2619 2620 /* Allocate memory for WRBQ */ 2621 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2622 phwi_ctxt->be_wrbq = kcalloc(phba->params.cxns_per_ctrl, 2623 sizeof(struct be_queue_info), 2624 GFP_KERNEL); 2625 if (!phwi_ctxt->be_wrbq) { 2626 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2627 "BM_%d : WRBQ Mem Alloc Failed\n"); 2628 return -ENOMEM; 2629 } 2630 2631 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2632 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2633 pwrb_context->pwrb_handle_base = 2634 kcalloc(phba->params.wrbs_per_cxn, 2635 sizeof(struct wrb_handle *), 2636 GFP_KERNEL); 2637 if (!pwrb_context->pwrb_handle_base) { 2638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2639 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2640 goto init_wrb_hndl_failed; 2641 } 2642 pwrb_context->pwrb_handle_basestd = 2643 kcalloc(phba->params.wrbs_per_cxn, 2644 sizeof(struct wrb_handle *), 2645 GFP_KERNEL); 2646 if (!pwrb_context->pwrb_handle_basestd) { 2647 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2648 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2649 goto init_wrb_hndl_failed; 2650 } 2651 if (!num_cxn_wrbh) { 2652 pwrb_handle = 2653 mem_descr_wrbh->mem_array[idx].virtual_address; 2654 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2655 ((sizeof(struct wrb_handle)) * 2656 phba->params.wrbs_per_cxn)); 2657 idx++; 2658 } 2659 pwrb_context->alloc_index = 0; 2660 pwrb_context->wrb_handles_available = 0; 2661 pwrb_context->free_index = 0; 2662 2663 if (num_cxn_wrbh) { 2664 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2665 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2666 pwrb_context->pwrb_handle_basestd[j] = 2667 pwrb_handle; 2668 pwrb_context->wrb_handles_available++; 2669 pwrb_handle->wrb_index = j; 2670 pwrb_handle++; 2671 } 2672 num_cxn_wrbh--; 2673 } 2674 spin_lock_init(&pwrb_context->wrb_lock); 2675 } 2676 idx = 0; 2677 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2678 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2679 if (!num_cxn_wrb) { 2680 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2681 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2682 ((sizeof(struct iscsi_wrb) * 2683 phba->params.wrbs_per_cxn)); 2684 idx++; 2685 } 2686 2687 if (num_cxn_wrb) { 2688 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2689 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2690 pwrb_handle->pwrb = pwrb; 2691 pwrb++; 2692 } 2693 num_cxn_wrb--; 2694 } 2695 } 2696 return 0; 2697 init_wrb_hndl_failed: 2698 for (j = index; j > 0; j--) { 2699 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2700 kfree(pwrb_context->pwrb_handle_base); 2701 kfree(pwrb_context->pwrb_handle_basestd); 2702 } 2703 return -ENOMEM; 2704 } 2705 2706 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2707 { 2708 uint8_t ulp_num; 2709 struct hwi_controller *phwi_ctrlr; 2710 struct hba_parameters *p = &phba->params; 2711 struct hd_async_context *pasync_ctx; 2712 struct hd_async_handle *pasync_header_h, *pasync_data_h; 2713 unsigned int index, idx, num_per_mem, num_async_data; 2714 struct be_mem_descriptor *mem_descr; 2715 2716 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2717 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2718 /* get async_ctx for each ULP */ 2719 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2720 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2721 (ulp_num * MEM_DESCR_OFFSET)); 2722 2723 phwi_ctrlr = phba->phwi_ctrlr; 2724 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2725 (struct hd_async_context *) 2726 mem_descr->mem_array[0].virtual_address; 2727 2728 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2729 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2730 2731 pasync_ctx->async_entry = 2732 (struct hd_async_entry *) 2733 ((long unsigned int)pasync_ctx + 2734 sizeof(struct hd_async_context)); 2735 2736 pasync_ctx->num_entries = BEISCSI_ASYNC_HDQ_SIZE(phba, 2737 ulp_num); 2738 /* setup header buffers */ 2739 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2740 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2741 (ulp_num * MEM_DESCR_OFFSET); 2742 if (mem_descr->mem_array[0].virtual_address) { 2743 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2744 "BM_%d : hwi_init_async_pdu_ctx" 2745 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2746 ulp_num, 2747 mem_descr->mem_array[0]. 2748 virtual_address); 2749 } else 2750 beiscsi_log(phba, KERN_WARNING, 2751 BEISCSI_LOG_INIT, 2752 "BM_%d : No Virtual address for ULP : %d\n", 2753 ulp_num); 2754 2755 pasync_ctx->async_header.pi = 0; 2756 pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz; 2757 pasync_ctx->async_header.va_base = 2758 mem_descr->mem_array[0].virtual_address; 2759 2760 pasync_ctx->async_header.pa_base.u.a64.address = 2761 mem_descr->mem_array[0]. 2762 bus_address.u.a64.address; 2763 2764 /* setup header buffer sgls */ 2765 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2766 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2767 (ulp_num * MEM_DESCR_OFFSET); 2768 if (mem_descr->mem_array[0].virtual_address) { 2769 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2770 "BM_%d : hwi_init_async_pdu_ctx" 2771 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2772 ulp_num, 2773 mem_descr->mem_array[0]. 2774 virtual_address); 2775 } else 2776 beiscsi_log(phba, KERN_WARNING, 2777 BEISCSI_LOG_INIT, 2778 "BM_%d : No Virtual address for ULP : %d\n", 2779 ulp_num); 2780 2781 pasync_ctx->async_header.ring_base = 2782 mem_descr->mem_array[0].virtual_address; 2783 2784 /* setup header buffer handles */ 2785 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2786 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2787 (ulp_num * MEM_DESCR_OFFSET); 2788 if (mem_descr->mem_array[0].virtual_address) { 2789 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2790 "BM_%d : hwi_init_async_pdu_ctx" 2791 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 2792 ulp_num, 2793 mem_descr->mem_array[0]. 2794 virtual_address); 2795 } else 2796 beiscsi_log(phba, KERN_WARNING, 2797 BEISCSI_LOG_INIT, 2798 "BM_%d : No Virtual address for ULP : %d\n", 2799 ulp_num); 2800 2801 pasync_ctx->async_header.handle_base = 2802 mem_descr->mem_array[0].virtual_address; 2803 2804 /* setup data buffer sgls */ 2805 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2806 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 2807 (ulp_num * MEM_DESCR_OFFSET); 2808 if (mem_descr->mem_array[0].virtual_address) { 2809 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2810 "BM_%d : hwi_init_async_pdu_ctx" 2811 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 2812 ulp_num, 2813 mem_descr->mem_array[0]. 2814 virtual_address); 2815 } else 2816 beiscsi_log(phba, KERN_WARNING, 2817 BEISCSI_LOG_INIT, 2818 "BM_%d : No Virtual address for ULP : %d\n", 2819 ulp_num); 2820 2821 pasync_ctx->async_data.ring_base = 2822 mem_descr->mem_array[0].virtual_address; 2823 2824 /* setup data buffer handles */ 2825 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2826 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2827 (ulp_num * MEM_DESCR_OFFSET); 2828 if (!mem_descr->mem_array[0].virtual_address) 2829 beiscsi_log(phba, KERN_WARNING, 2830 BEISCSI_LOG_INIT, 2831 "BM_%d : No Virtual address for ULP : %d\n", 2832 ulp_num); 2833 2834 pasync_ctx->async_data.handle_base = 2835 mem_descr->mem_array[0].virtual_address; 2836 2837 pasync_header_h = 2838 (struct hd_async_handle *) 2839 pasync_ctx->async_header.handle_base; 2840 pasync_data_h = 2841 (struct hd_async_handle *) 2842 pasync_ctx->async_data.handle_base; 2843 2844 /* setup data buffers */ 2845 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2846 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2847 (ulp_num * MEM_DESCR_OFFSET); 2848 if (mem_descr->mem_array[0].virtual_address) { 2849 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2850 "BM_%d : hwi_init_async_pdu_ctx" 2851 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 2852 ulp_num, 2853 mem_descr->mem_array[0]. 2854 virtual_address); 2855 } else 2856 beiscsi_log(phba, KERN_WARNING, 2857 BEISCSI_LOG_INIT, 2858 "BM_%d : No Virtual address for ULP : %d\n", 2859 ulp_num); 2860 2861 idx = 0; 2862 pasync_ctx->async_data.pi = 0; 2863 pasync_ctx->async_data.buffer_size = p->defpdu_data_sz; 2864 pasync_ctx->async_data.va_base = 2865 mem_descr->mem_array[idx].virtual_address; 2866 pasync_ctx->async_data.pa_base.u.a64.address = 2867 mem_descr->mem_array[idx]. 2868 bus_address.u.a64.address; 2869 2870 num_async_data = ((mem_descr->mem_array[idx].size) / 2871 phba->params.defpdu_data_sz); 2872 num_per_mem = 0; 2873 2874 for (index = 0; index < BEISCSI_ASYNC_HDQ_SIZE 2875 (phba, ulp_num); index++) { 2876 pasync_header_h->cri = -1; 2877 pasync_header_h->is_header = 1; 2878 pasync_header_h->index = index; 2879 INIT_LIST_HEAD(&pasync_header_h->link); 2880 pasync_header_h->pbuffer = 2881 (void *)((unsigned long) 2882 (pasync_ctx-> 2883 async_header.va_base) + 2884 (p->defpdu_hdr_sz * index)); 2885 2886 pasync_header_h->pa.u.a64.address = 2887 pasync_ctx->async_header.pa_base.u.a64. 2888 address + (p->defpdu_hdr_sz * index); 2889 2890 pasync_ctx->async_entry[index].header = 2891 pasync_header_h; 2892 pasync_header_h++; 2893 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2894 wq.list); 2895 2896 pasync_data_h->cri = -1; 2897 pasync_data_h->is_header = 0; 2898 pasync_data_h->index = index; 2899 INIT_LIST_HEAD(&pasync_data_h->link); 2900 2901 if (!num_async_data) { 2902 num_per_mem = 0; 2903 idx++; 2904 pasync_ctx->async_data.va_base = 2905 mem_descr->mem_array[idx]. 2906 virtual_address; 2907 pasync_ctx->async_data.pa_base.u. 2908 a64.address = 2909 mem_descr->mem_array[idx]. 2910 bus_address.u.a64.address; 2911 num_async_data = 2912 ((mem_descr->mem_array[idx]. 2913 size) / 2914 phba->params.defpdu_data_sz); 2915 } 2916 pasync_data_h->pbuffer = 2917 (void *)((unsigned long) 2918 (pasync_ctx->async_data.va_base) + 2919 (p->defpdu_data_sz * num_per_mem)); 2920 2921 pasync_data_h->pa.u.a64.address = 2922 pasync_ctx->async_data.pa_base.u.a64. 2923 address + (p->defpdu_data_sz * 2924 num_per_mem); 2925 num_per_mem++; 2926 num_async_data--; 2927 2928 pasync_ctx->async_entry[index].data = 2929 pasync_data_h; 2930 pasync_data_h++; 2931 } 2932 } 2933 } 2934 2935 return 0; 2936 } 2937 2938 static int 2939 be_sgl_create_contiguous(void *virtual_address, 2940 u64 physical_address, u32 length, 2941 struct be_dma_mem *sgl) 2942 { 2943 WARN_ON(!virtual_address); 2944 WARN_ON(!physical_address); 2945 WARN_ON(!length); 2946 WARN_ON(!sgl); 2947 2948 sgl->va = virtual_address; 2949 sgl->dma = (unsigned long)physical_address; 2950 sgl->size = length; 2951 2952 return 0; 2953 } 2954 2955 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2956 { 2957 memset(sgl, 0, sizeof(*sgl)); 2958 } 2959 2960 static void 2961 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2962 struct mem_array *pmem, struct be_dma_mem *sgl) 2963 { 2964 if (sgl->va) 2965 be_sgl_destroy_contiguous(sgl); 2966 2967 be_sgl_create_contiguous(pmem->virtual_address, 2968 pmem->bus_address.u.a64.address, 2969 pmem->size, sgl); 2970 } 2971 2972 static void 2973 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 2974 struct mem_array *pmem, struct be_dma_mem *sgl) 2975 { 2976 if (sgl->va) 2977 be_sgl_destroy_contiguous(sgl); 2978 2979 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 2980 pmem->bus_address.u.a64.address, 2981 pmem->size, sgl); 2982 } 2983 2984 static int be_fill_queue(struct be_queue_info *q, 2985 u16 len, u16 entry_size, void *vaddress) 2986 { 2987 struct be_dma_mem *mem = &q->dma_mem; 2988 2989 memset(q, 0, sizeof(*q)); 2990 q->len = len; 2991 q->entry_size = entry_size; 2992 mem->size = len * entry_size; 2993 mem->va = vaddress; 2994 if (!mem->va) 2995 return -ENOMEM; 2996 memset(mem->va, 0, mem->size); 2997 return 0; 2998 } 2999 3000 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3001 struct hwi_context_memory *phwi_context) 3002 { 3003 int ret = -ENOMEM, eq_for_mcc; 3004 unsigned int i, num_eq_pages; 3005 struct be_queue_info *eq; 3006 struct be_dma_mem *mem; 3007 void *eq_vaddress; 3008 dma_addr_t paddr; 3009 3010 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3011 sizeof(struct be_eq_entry)); 3012 3013 if (phba->pcidev->msix_enabled) 3014 eq_for_mcc = 1; 3015 else 3016 eq_for_mcc = 0; 3017 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3018 eq = &phwi_context->be_eq[i].q; 3019 mem = &eq->dma_mem; 3020 phwi_context->be_eq[i].phba = phba; 3021 eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3022 num_eq_pages * PAGE_SIZE, 3023 &paddr, GFP_KERNEL); 3024 if (!eq_vaddress) { 3025 ret = -ENOMEM; 3026 goto create_eq_error; 3027 } 3028 3029 mem->va = eq_vaddress; 3030 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3031 sizeof(struct be_eq_entry), eq_vaddress); 3032 if (ret) { 3033 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3034 "BM_%d : be_fill_queue Failed for EQ\n"); 3035 goto create_eq_error; 3036 } 3037 3038 mem->dma = paddr; 3039 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3040 BEISCSI_EQ_DELAY_DEF); 3041 if (ret) { 3042 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3043 "BM_%d : beiscsi_cmd_eq_create" 3044 "Failed for EQ\n"); 3045 goto create_eq_error; 3046 } 3047 3048 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3049 "BM_%d : eqid = %d\n", 3050 phwi_context->be_eq[i].q.id); 3051 } 3052 return 0; 3053 3054 create_eq_error: 3055 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3056 eq = &phwi_context->be_eq[i].q; 3057 mem = &eq->dma_mem; 3058 if (mem->va) 3059 dma_free_coherent(&phba->pcidev->dev, num_eq_pages 3060 * PAGE_SIZE, 3061 mem->va, mem->dma); 3062 } 3063 return ret; 3064 } 3065 3066 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3067 struct hwi_context_memory *phwi_context) 3068 { 3069 unsigned int i, num_cq_pages; 3070 struct be_queue_info *cq, *eq; 3071 struct be_dma_mem *mem; 3072 struct be_eq_obj *pbe_eq; 3073 void *cq_vaddress; 3074 int ret = -ENOMEM; 3075 dma_addr_t paddr; 3076 3077 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3078 sizeof(struct sol_cqe)); 3079 3080 for (i = 0; i < phba->num_cpus; i++) { 3081 cq = &phwi_context->be_cq[i]; 3082 eq = &phwi_context->be_eq[i].q; 3083 pbe_eq = &phwi_context->be_eq[i]; 3084 pbe_eq->cq = cq; 3085 pbe_eq->phba = phba; 3086 mem = &cq->dma_mem; 3087 cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev, 3088 num_cq_pages * PAGE_SIZE, 3089 &paddr, GFP_KERNEL); 3090 if (!cq_vaddress) { 3091 ret = -ENOMEM; 3092 goto create_cq_error; 3093 } 3094 3095 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3096 sizeof(struct sol_cqe), cq_vaddress); 3097 if (ret) { 3098 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3099 "BM_%d : be_fill_queue Failed " 3100 "for ISCSI CQ\n"); 3101 goto create_cq_error; 3102 } 3103 3104 mem->dma = paddr; 3105 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3106 false, 0); 3107 if (ret) { 3108 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3109 "BM_%d : beiscsi_cmd_eq_create" 3110 "Failed for ISCSI CQ\n"); 3111 goto create_cq_error; 3112 } 3113 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3114 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3115 "iSCSI CQ CREATED\n", cq->id, eq->id); 3116 } 3117 return 0; 3118 3119 create_cq_error: 3120 for (i = 0; i < phba->num_cpus; i++) { 3121 cq = &phwi_context->be_cq[i]; 3122 mem = &cq->dma_mem; 3123 if (mem->va) 3124 dma_free_coherent(&phba->pcidev->dev, num_cq_pages 3125 * PAGE_SIZE, 3126 mem->va, mem->dma); 3127 } 3128 return ret; 3129 } 3130 3131 static int 3132 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3133 struct hwi_context_memory *phwi_context, 3134 struct hwi_controller *phwi_ctrlr, 3135 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3136 { 3137 unsigned int idx; 3138 int ret; 3139 struct be_queue_info *dq, *cq; 3140 struct be_dma_mem *mem; 3141 struct be_mem_descriptor *mem_descr; 3142 void *dq_vaddress; 3143 3144 idx = 0; 3145 dq = &phwi_context->be_def_hdrq[ulp_num]; 3146 cq = &phwi_context->be_cq[0]; 3147 mem = &dq->dma_mem; 3148 mem_descr = phba->init_mem; 3149 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3150 (ulp_num * MEM_DESCR_OFFSET); 3151 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3152 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3153 sizeof(struct phys_addr), 3154 sizeof(struct phys_addr), dq_vaddress); 3155 if (ret) { 3156 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3157 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3158 ulp_num); 3159 3160 return ret; 3161 } 3162 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3163 bus_address.u.a64.address; 3164 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3165 def_pdu_ring_sz, 3166 phba->params.defpdu_hdr_sz, 3167 BEISCSI_DEFQ_HDR, ulp_num); 3168 if (ret) { 3169 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3170 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3171 ulp_num); 3172 3173 return ret; 3174 } 3175 3176 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3177 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3178 ulp_num, 3179 phwi_context->be_def_hdrq[ulp_num].id); 3180 return 0; 3181 } 3182 3183 static int 3184 beiscsi_create_def_data(struct beiscsi_hba *phba, 3185 struct hwi_context_memory *phwi_context, 3186 struct hwi_controller *phwi_ctrlr, 3187 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3188 { 3189 unsigned int idx; 3190 int ret; 3191 struct be_queue_info *dataq, *cq; 3192 struct be_dma_mem *mem; 3193 struct be_mem_descriptor *mem_descr; 3194 void *dq_vaddress; 3195 3196 idx = 0; 3197 dataq = &phwi_context->be_def_dataq[ulp_num]; 3198 cq = &phwi_context->be_cq[0]; 3199 mem = &dataq->dma_mem; 3200 mem_descr = phba->init_mem; 3201 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3202 (ulp_num * MEM_DESCR_OFFSET); 3203 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3204 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3205 sizeof(struct phys_addr), 3206 sizeof(struct phys_addr), dq_vaddress); 3207 if (ret) { 3208 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3209 "BM_%d : be_fill_queue Failed for DEF PDU " 3210 "DATA on ULP : %d\n", 3211 ulp_num); 3212 3213 return ret; 3214 } 3215 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3216 bus_address.u.a64.address; 3217 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3218 def_pdu_ring_sz, 3219 phba->params.defpdu_data_sz, 3220 BEISCSI_DEFQ_DATA, ulp_num); 3221 if (ret) { 3222 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3223 "BM_%d be_cmd_create_default_pdu_queue" 3224 " Failed for DEF PDU DATA on ULP : %d\n", 3225 ulp_num); 3226 return ret; 3227 } 3228 3229 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3230 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3231 ulp_num, 3232 phwi_context->be_def_dataq[ulp_num].id); 3233 3234 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3235 "BM_%d : DEFAULT PDU DATA RING CREATED" 3236 "on ULP : %d\n", ulp_num); 3237 return 0; 3238 } 3239 3240 3241 static int 3242 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3243 { 3244 struct be_mem_descriptor *mem_descr; 3245 struct mem_array *pm_arr; 3246 struct be_dma_mem sgl; 3247 int status, ulp_num; 3248 3249 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3250 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3251 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3252 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3253 (ulp_num * MEM_DESCR_OFFSET); 3254 pm_arr = mem_descr->mem_array; 3255 3256 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3257 status = be_cmd_iscsi_post_template_hdr( 3258 &phba->ctrl, &sgl); 3259 3260 if (status != 0) { 3261 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3262 "BM_%d : Post Template HDR Failed for" 3263 "ULP_%d\n", ulp_num); 3264 return status; 3265 } 3266 3267 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3268 "BM_%d : Template HDR Pages Posted for" 3269 "ULP_%d\n", ulp_num); 3270 } 3271 } 3272 return 0; 3273 } 3274 3275 static int 3276 beiscsi_post_pages(struct beiscsi_hba *phba) 3277 { 3278 struct be_mem_descriptor *mem_descr; 3279 struct mem_array *pm_arr; 3280 unsigned int page_offset, i; 3281 struct be_dma_mem sgl; 3282 int status, ulp_num = 0; 3283 3284 mem_descr = phba->init_mem; 3285 mem_descr += HWI_MEM_SGE; 3286 pm_arr = mem_descr->mem_array; 3287 3288 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3289 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3290 break; 3291 3292 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3293 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3294 for (i = 0; i < mem_descr->num_elements; i++) { 3295 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3296 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3297 page_offset, 3298 (pm_arr->size / PAGE_SIZE)); 3299 page_offset += pm_arr->size / PAGE_SIZE; 3300 if (status != 0) { 3301 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3302 "BM_%d : post sgl failed.\n"); 3303 return status; 3304 } 3305 pm_arr++; 3306 } 3307 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3308 "BM_%d : POSTED PAGES\n"); 3309 return 0; 3310 } 3311 3312 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3313 { 3314 struct be_dma_mem *mem = &q->dma_mem; 3315 if (mem->va) { 3316 dma_free_coherent(&phba->pcidev->dev, mem->size, 3317 mem->va, mem->dma); 3318 mem->va = NULL; 3319 } 3320 } 3321 3322 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3323 u16 len, u16 entry_size) 3324 { 3325 struct be_dma_mem *mem = &q->dma_mem; 3326 3327 memset(q, 0, sizeof(*q)); 3328 q->len = len; 3329 q->entry_size = entry_size; 3330 mem->size = len * entry_size; 3331 mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma, 3332 GFP_KERNEL); 3333 if (!mem->va) 3334 return -ENOMEM; 3335 return 0; 3336 } 3337 3338 static int 3339 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3340 struct hwi_context_memory *phwi_context, 3341 struct hwi_controller *phwi_ctrlr) 3342 { 3343 unsigned int num_wrb_rings; 3344 u64 pa_addr_lo; 3345 unsigned int idx, num, i, ulp_num; 3346 struct mem_array *pwrb_arr; 3347 void *wrb_vaddr; 3348 struct be_dma_mem sgl; 3349 struct be_mem_descriptor *mem_descr; 3350 struct hwi_wrb_context *pwrb_context; 3351 int status; 3352 uint8_t ulp_count = 0, ulp_base_num = 0; 3353 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3354 3355 idx = 0; 3356 mem_descr = phba->init_mem; 3357 mem_descr += HWI_MEM_WRB; 3358 pwrb_arr = kmalloc_array(phba->params.cxns_per_ctrl, 3359 sizeof(*pwrb_arr), 3360 GFP_KERNEL); 3361 if (!pwrb_arr) { 3362 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3363 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3364 return -ENOMEM; 3365 } 3366 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3367 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3368 num_wrb_rings = mem_descr->mem_array[idx].size / 3369 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3370 3371 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3372 if (num_wrb_rings) { 3373 pwrb_arr[num].virtual_address = wrb_vaddr; 3374 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3375 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3376 sizeof(struct iscsi_wrb); 3377 wrb_vaddr += pwrb_arr[num].size; 3378 pa_addr_lo += pwrb_arr[num].size; 3379 num_wrb_rings--; 3380 } else { 3381 idx++; 3382 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3383 pa_addr_lo = mem_descr->mem_array[idx].\ 3384 bus_address.u.a64.address; 3385 num_wrb_rings = mem_descr->mem_array[idx].size / 3386 (phba->params.wrbs_per_cxn * 3387 sizeof(struct iscsi_wrb)); 3388 pwrb_arr[num].virtual_address = wrb_vaddr; 3389 pwrb_arr[num].bus_address.u.a64.address\ 3390 = pa_addr_lo; 3391 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3392 sizeof(struct iscsi_wrb); 3393 wrb_vaddr += pwrb_arr[num].size; 3394 pa_addr_lo += pwrb_arr[num].size; 3395 num_wrb_rings--; 3396 } 3397 } 3398 3399 /* Get the ULP Count */ 3400 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3401 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3402 ulp_count++; 3403 ulp_base_num = ulp_num; 3404 cid_count_ulp[ulp_num] = 3405 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3406 } 3407 3408 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3409 if (ulp_count > 1) { 3410 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3411 3412 if (!cid_count_ulp[ulp_base_num]) 3413 ulp_base_num = (ulp_base_num + 1) % 3414 BEISCSI_ULP_COUNT; 3415 3416 cid_count_ulp[ulp_base_num]--; 3417 } 3418 3419 3420 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3421 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3422 &phwi_context->be_wrbq[i], 3423 &phwi_ctrlr->wrb_context[i], 3424 ulp_base_num); 3425 if (status != 0) { 3426 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3427 "BM_%d : wrbq create failed."); 3428 kfree(pwrb_arr); 3429 return status; 3430 } 3431 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3432 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3433 } 3434 kfree(pwrb_arr); 3435 return 0; 3436 } 3437 3438 static void free_wrb_handles(struct beiscsi_hba *phba) 3439 { 3440 unsigned int index; 3441 struct hwi_controller *phwi_ctrlr; 3442 struct hwi_wrb_context *pwrb_context; 3443 3444 phwi_ctrlr = phba->phwi_ctrlr; 3445 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3446 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3447 kfree(pwrb_context->pwrb_handle_base); 3448 kfree(pwrb_context->pwrb_handle_basestd); 3449 } 3450 } 3451 3452 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3453 { 3454 struct be_ctrl_info *ctrl = &phba->ctrl; 3455 struct be_dma_mem *ptag_mem; 3456 struct be_queue_info *q; 3457 int i, tag; 3458 3459 q = &phba->ctrl.mcc_obj.q; 3460 for (i = 0; i < MAX_MCC_CMD; i++) { 3461 tag = i + 1; 3462 if (!test_bit(MCC_TAG_STATE_RUNNING, 3463 &ctrl->ptag_state[tag].tag_state)) 3464 continue; 3465 3466 if (test_bit(MCC_TAG_STATE_TIMEOUT, 3467 &ctrl->ptag_state[tag].tag_state)) { 3468 ptag_mem = &ctrl->ptag_state[tag].tag_mem_state; 3469 if (ptag_mem->size) { 3470 dma_free_coherent(&ctrl->pdev->dev, 3471 ptag_mem->size, 3472 ptag_mem->va, 3473 ptag_mem->dma); 3474 ptag_mem->size = 0; 3475 } 3476 continue; 3477 } 3478 /** 3479 * If MCC is still active and waiting then wake up the process. 3480 * We are here only because port is going offline. The process 3481 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is 3482 * returned for the operation and allocated memory cleaned up. 3483 */ 3484 if (waitqueue_active(&ctrl->mcc_wait[tag])) { 3485 ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED; 3486 ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK; 3487 wake_up_interruptible(&ctrl->mcc_wait[tag]); 3488 /* 3489 * Control tag info gets reinitialized in enable 3490 * so wait for the process to clear running state. 3491 */ 3492 while (test_bit(MCC_TAG_STATE_RUNNING, 3493 &ctrl->ptag_state[tag].tag_state)) 3494 schedule_timeout_uninterruptible(HZ); 3495 } 3496 /** 3497 * For MCC with tag_states MCC_TAG_STATE_ASYNC and 3498 * MCC_TAG_STATE_IGNORE nothing needs to done. 3499 */ 3500 } 3501 if (q->created) { 3502 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3503 be_queue_free(phba, q); 3504 } 3505 3506 q = &phba->ctrl.mcc_obj.cq; 3507 if (q->created) { 3508 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3509 be_queue_free(phba, q); 3510 } 3511 } 3512 3513 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3514 struct hwi_context_memory *phwi_context) 3515 { 3516 struct be_queue_info *q, *cq; 3517 struct be_ctrl_info *ctrl = &phba->ctrl; 3518 3519 /* Alloc MCC compl queue */ 3520 cq = &phba->ctrl.mcc_obj.cq; 3521 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3522 sizeof(struct be_mcc_compl))) 3523 goto err; 3524 /* Ask BE to create MCC compl queue; */ 3525 if (phba->pcidev->msix_enabled) { 3526 if (beiscsi_cmd_cq_create(ctrl, cq, 3527 &phwi_context->be_eq[phba->num_cpus].q, 3528 false, true, 0)) 3529 goto mcc_cq_free; 3530 } else { 3531 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3532 false, true, 0)) 3533 goto mcc_cq_free; 3534 } 3535 3536 /* Alloc MCC queue */ 3537 q = &phba->ctrl.mcc_obj.q; 3538 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3539 goto mcc_cq_destroy; 3540 3541 /* Ask BE to create MCC queue */ 3542 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3543 goto mcc_q_free; 3544 3545 return 0; 3546 3547 mcc_q_free: 3548 be_queue_free(phba, q); 3549 mcc_cq_destroy: 3550 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3551 mcc_cq_free: 3552 be_queue_free(phba, cq); 3553 err: 3554 return -ENOMEM; 3555 } 3556 3557 static void be2iscsi_enable_msix(struct beiscsi_hba *phba) 3558 { 3559 int nvec = 1; 3560 3561 switch (phba->generation) { 3562 case BE_GEN2: 3563 case BE_GEN3: 3564 nvec = BEISCSI_MAX_NUM_CPUS + 1; 3565 break; 3566 case BE_GEN4: 3567 nvec = phba->fw_config.eqid_count; 3568 break; 3569 default: 3570 nvec = 2; 3571 break; 3572 } 3573 3574 /* if eqid_count == 1 fall back to INTX */ 3575 if (enable_msix && nvec > 1) { 3576 const struct irq_affinity desc = { .post_vectors = 1 }; 3577 3578 if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, 3579 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { 3580 phba->num_cpus = nvec - 1; 3581 return; 3582 } 3583 } 3584 3585 phba->num_cpus = 1; 3586 } 3587 3588 static void hwi_purge_eq(struct beiscsi_hba *phba) 3589 { 3590 struct hwi_controller *phwi_ctrlr; 3591 struct hwi_context_memory *phwi_context; 3592 struct be_queue_info *eq; 3593 struct be_eq_entry *eqe = NULL; 3594 int i, eq_msix; 3595 unsigned int num_processed; 3596 3597 if (beiscsi_hba_in_error(phba)) 3598 return; 3599 3600 phwi_ctrlr = phba->phwi_ctrlr; 3601 phwi_context = phwi_ctrlr->phwi_ctxt; 3602 if (phba->pcidev->msix_enabled) 3603 eq_msix = 1; 3604 else 3605 eq_msix = 0; 3606 3607 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 3608 eq = &phwi_context->be_eq[i].q; 3609 eqe = queue_tail_node(eq); 3610 num_processed = 0; 3611 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 3612 & EQE_VALID_MASK) { 3613 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 3614 queue_tail_inc(eq); 3615 eqe = queue_tail_node(eq); 3616 num_processed++; 3617 } 3618 3619 if (num_processed) 3620 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 3621 } 3622 } 3623 3624 static void hwi_cleanup_port(struct beiscsi_hba *phba) 3625 { 3626 struct be_queue_info *q; 3627 struct be_ctrl_info *ctrl = &phba->ctrl; 3628 struct hwi_controller *phwi_ctrlr; 3629 struct hwi_context_memory *phwi_context; 3630 int i, eq_for_mcc, ulp_num; 3631 3632 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3633 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3634 beiscsi_cmd_iscsi_cleanup(phba, ulp_num); 3635 3636 /** 3637 * Purge all EQ entries that may have been left out. This is to 3638 * workaround a problem we've seen occasionally where driver gets an 3639 * interrupt with EQ entry bit set after stopping the controller. 3640 */ 3641 hwi_purge_eq(phba); 3642 3643 phwi_ctrlr = phba->phwi_ctrlr; 3644 phwi_context = phwi_ctrlr->phwi_ctxt; 3645 3646 be_cmd_iscsi_remove_template_hdr(ctrl); 3647 3648 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3649 q = &phwi_context->be_wrbq[i]; 3650 if (q->created) 3651 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3652 } 3653 kfree(phwi_context->be_wrbq); 3654 free_wrb_handles(phba); 3655 3656 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3657 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3658 3659 q = &phwi_context->be_def_hdrq[ulp_num]; 3660 if (q->created) 3661 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3662 3663 q = &phwi_context->be_def_dataq[ulp_num]; 3664 if (q->created) 3665 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3666 } 3667 } 3668 3669 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3670 3671 for (i = 0; i < (phba->num_cpus); i++) { 3672 q = &phwi_context->be_cq[i]; 3673 if (q->created) { 3674 be_queue_free(phba, q); 3675 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3676 } 3677 } 3678 3679 be_mcc_queues_destroy(phba); 3680 if (phba->pcidev->msix_enabled) 3681 eq_for_mcc = 1; 3682 else 3683 eq_for_mcc = 0; 3684 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3685 q = &phwi_context->be_eq[i].q; 3686 if (q->created) { 3687 be_queue_free(phba, q); 3688 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3689 } 3690 } 3691 /* this ensures complete FW cleanup */ 3692 beiscsi_cmd_function_reset(phba); 3693 /* last communication, indicate driver is unloading */ 3694 beiscsi_cmd_special_wrb(&phba->ctrl, 0); 3695 } 3696 3697 static int hwi_init_port(struct beiscsi_hba *phba) 3698 { 3699 struct hwi_controller *phwi_ctrlr; 3700 struct hwi_context_memory *phwi_context; 3701 unsigned int def_pdu_ring_sz; 3702 struct be_ctrl_info *ctrl = &phba->ctrl; 3703 int status, ulp_num; 3704 u16 nbufs; 3705 3706 phwi_ctrlr = phba->phwi_ctrlr; 3707 phwi_context = phwi_ctrlr->phwi_ctxt; 3708 /* set port optic state to unknown */ 3709 phba->optic_state = 0xff; 3710 3711 status = beiscsi_create_eqs(phba, phwi_context); 3712 if (status != 0) { 3713 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3714 "BM_%d : EQ not created\n"); 3715 goto error; 3716 } 3717 3718 status = be_mcc_queues_create(phba, phwi_context); 3719 if (status != 0) 3720 goto error; 3721 3722 status = beiscsi_check_supported_fw(ctrl, phba); 3723 if (status != 0) { 3724 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3725 "BM_%d : Unsupported fw version\n"); 3726 goto error; 3727 } 3728 3729 status = beiscsi_create_cqs(phba, phwi_context); 3730 if (status != 0) { 3731 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3732 "BM_%d : CQ not created\n"); 3733 goto error; 3734 } 3735 3736 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3737 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3738 nbufs = phwi_context->pasync_ctx[ulp_num]->num_entries; 3739 def_pdu_ring_sz = nbufs * sizeof(struct phys_addr); 3740 3741 status = beiscsi_create_def_hdr(phba, phwi_context, 3742 phwi_ctrlr, 3743 def_pdu_ring_sz, 3744 ulp_num); 3745 if (status != 0) { 3746 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3747 "BM_%d : Default Header not created for ULP : %d\n", 3748 ulp_num); 3749 goto error; 3750 } 3751 3752 status = beiscsi_create_def_data(phba, phwi_context, 3753 phwi_ctrlr, 3754 def_pdu_ring_sz, 3755 ulp_num); 3756 if (status != 0) { 3757 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3758 "BM_%d : Default Data not created for ULP : %d\n", 3759 ulp_num); 3760 goto error; 3761 } 3762 /** 3763 * Now that the default PDU rings have been created, 3764 * let EP know about it. 3765 */ 3766 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR, 3767 ulp_num, nbufs); 3768 beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA, 3769 ulp_num, nbufs); 3770 } 3771 } 3772 3773 status = beiscsi_post_pages(phba); 3774 if (status != 0) { 3775 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3776 "BM_%d : Post SGL Pages Failed\n"); 3777 goto error; 3778 } 3779 3780 status = beiscsi_post_template_hdr(phba); 3781 if (status != 0) { 3782 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3783 "BM_%d : Template HDR Posting for CXN Failed\n"); 3784 } 3785 3786 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3787 if (status != 0) { 3788 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3789 "BM_%d : WRB Rings not created\n"); 3790 goto error; 3791 } 3792 3793 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3794 uint16_t async_arr_idx = 0; 3795 3796 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3797 uint16_t cri = 0; 3798 struct hd_async_context *pasync_ctx; 3799 3800 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3801 phwi_ctrlr, ulp_num); 3802 for (cri = 0; cri < 3803 phba->params.cxns_per_ctrl; cri++) { 3804 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3805 (phwi_ctrlr, cri)) 3806 pasync_ctx->cid_to_async_cri_map[ 3807 phwi_ctrlr->wrb_context[cri].cid] = 3808 async_arr_idx++; 3809 } 3810 } 3811 } 3812 3813 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3814 "BM_%d : hwi_init_port success\n"); 3815 return 0; 3816 3817 error: 3818 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3819 "BM_%d : hwi_init_port failed"); 3820 hwi_cleanup_port(phba); 3821 return status; 3822 } 3823 3824 static int hwi_init_controller(struct beiscsi_hba *phba) 3825 { 3826 struct hwi_controller *phwi_ctrlr; 3827 3828 phwi_ctrlr = phba->phwi_ctrlr; 3829 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3830 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3831 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3832 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3833 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3834 phwi_ctrlr->phwi_ctxt); 3835 } else { 3836 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3837 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3838 "than one element.Failing to load\n"); 3839 return -ENOMEM; 3840 } 3841 3842 iscsi_init_global_templates(phba); 3843 if (beiscsi_init_wrb_handle(phba)) 3844 return -ENOMEM; 3845 3846 if (hwi_init_async_pdu_ctx(phba)) { 3847 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3848 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3849 return -ENOMEM; 3850 } 3851 3852 if (hwi_init_port(phba) != 0) { 3853 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3854 "BM_%d : hwi_init_controller failed\n"); 3855 3856 return -ENOMEM; 3857 } 3858 return 0; 3859 } 3860 3861 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3862 { 3863 struct be_mem_descriptor *mem_descr; 3864 int i, j; 3865 3866 mem_descr = phba->init_mem; 3867 i = 0; 3868 j = 0; 3869 for (i = 0; i < SE_MEM_MAX; i++) { 3870 for (j = mem_descr->num_elements; j > 0; j--) { 3871 dma_free_coherent(&phba->pcidev->dev, 3872 mem_descr->mem_array[j - 1].size, 3873 mem_descr->mem_array[j - 1].virtual_address, 3874 (unsigned long)mem_descr->mem_array[j - 1]. 3875 bus_address.u.a64.address); 3876 } 3877 3878 kfree(mem_descr->mem_array); 3879 mem_descr++; 3880 } 3881 kfree(phba->init_mem); 3882 kfree(phba->phwi_ctrlr->wrb_context); 3883 kfree(phba->phwi_ctrlr); 3884 } 3885 3886 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3887 { 3888 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3889 struct sgl_handle *psgl_handle; 3890 struct iscsi_sge *pfrag; 3891 unsigned int arr_index, i, idx; 3892 unsigned int ulp_icd_start, ulp_num = 0; 3893 3894 phba->io_sgl_hndl_avbl = 0; 3895 phba->eh_sgl_hndl_avbl = 0; 3896 3897 mem_descr_sglh = phba->init_mem; 3898 mem_descr_sglh += HWI_MEM_SGLH; 3899 if (1 == mem_descr_sglh->num_elements) { 3900 phba->io_sgl_hndl_base = kcalloc(phba->params.ios_per_ctrl, 3901 sizeof(struct sgl_handle *), 3902 GFP_KERNEL); 3903 if (!phba->io_sgl_hndl_base) { 3904 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3905 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3906 return -ENOMEM; 3907 } 3908 phba->eh_sgl_hndl_base = 3909 kcalloc(phba->params.icds_per_ctrl - 3910 phba->params.ios_per_ctrl, 3911 sizeof(struct sgl_handle *), GFP_KERNEL); 3912 if (!phba->eh_sgl_hndl_base) { 3913 kfree(phba->io_sgl_hndl_base); 3914 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3915 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3916 return -ENOMEM; 3917 } 3918 } else { 3919 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3920 "BM_%d : HWI_MEM_SGLH is more than one element." 3921 "Failing to load\n"); 3922 return -ENOMEM; 3923 } 3924 3925 arr_index = 0; 3926 idx = 0; 3927 while (idx < mem_descr_sglh->num_elements) { 3928 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3929 3930 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3931 sizeof(struct sgl_handle)); i++) { 3932 if (arr_index < phba->params.ios_per_ctrl) { 3933 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3934 phba->io_sgl_hndl_avbl++; 3935 arr_index++; 3936 } else { 3937 phba->eh_sgl_hndl_base[arr_index - 3938 phba->params.ios_per_ctrl] = 3939 psgl_handle; 3940 arr_index++; 3941 phba->eh_sgl_hndl_avbl++; 3942 } 3943 psgl_handle++; 3944 } 3945 idx++; 3946 } 3947 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3948 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3949 "phba->eh_sgl_hndl_avbl=%d\n", 3950 phba->io_sgl_hndl_avbl, 3951 phba->eh_sgl_hndl_avbl); 3952 3953 mem_descr_sg = phba->init_mem; 3954 mem_descr_sg += HWI_MEM_SGE; 3955 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3956 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3957 mem_descr_sg->num_elements); 3958 3959 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3960 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3961 break; 3962 3963 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 3964 3965 arr_index = 0; 3966 idx = 0; 3967 while (idx < mem_descr_sg->num_elements) { 3968 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3969 3970 for (i = 0; 3971 i < (mem_descr_sg->mem_array[idx].size) / 3972 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3973 i++) { 3974 if (arr_index < phba->params.ios_per_ctrl) 3975 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3976 else 3977 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3978 phba->params.ios_per_ctrl]; 3979 psgl_handle->pfrag = pfrag; 3980 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3981 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3982 pfrag += phba->params.num_sge_per_io; 3983 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 3984 } 3985 idx++; 3986 } 3987 phba->io_sgl_free_index = 0; 3988 phba->io_sgl_alloc_index = 0; 3989 phba->eh_sgl_free_index = 0; 3990 phba->eh_sgl_alloc_index = 0; 3991 return 0; 3992 } 3993 3994 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3995 { 3996 int ret; 3997 uint16_t i, ulp_num; 3998 struct ulp_cid_info *ptr_cid_info = NULL; 3999 4000 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4001 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4002 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4003 GFP_KERNEL); 4004 4005 if (!ptr_cid_info) { 4006 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4007 "BM_%d : Failed to allocate memory" 4008 "for ULP_CID_INFO for ULP : %d\n", 4009 ulp_num); 4010 ret = -ENOMEM; 4011 goto free_memory; 4012 4013 } 4014 4015 /* Allocate memory for CID array */ 4016 ptr_cid_info->cid_array = 4017 kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num), 4018 sizeof(*ptr_cid_info->cid_array), 4019 GFP_KERNEL); 4020 if (!ptr_cid_info->cid_array) { 4021 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4022 "BM_%d : Failed to allocate memory" 4023 "for CID_ARRAY for ULP : %d\n", 4024 ulp_num); 4025 kfree(ptr_cid_info); 4026 ptr_cid_info = NULL; 4027 ret = -ENOMEM; 4028 4029 goto free_memory; 4030 } 4031 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4032 phba, ulp_num); 4033 4034 /* Save the cid_info_array ptr */ 4035 phba->cid_array_info[ulp_num] = ptr_cid_info; 4036 } 4037 } 4038 phba->ep_array = kcalloc(phba->params.cxns_per_ctrl, 4039 sizeof(struct iscsi_endpoint *), 4040 GFP_KERNEL); 4041 if (!phba->ep_array) { 4042 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4043 "BM_%d : Failed to allocate memory in " 4044 "hba_setup_cid_tbls\n"); 4045 ret = -ENOMEM; 4046 4047 goto free_memory; 4048 } 4049 4050 phba->conn_table = kcalloc(phba->params.cxns_per_ctrl, 4051 sizeof(struct beiscsi_conn *), 4052 GFP_KERNEL); 4053 if (!phba->conn_table) { 4054 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4055 "BM_%d : Failed to allocate memory in" 4056 "hba_setup_cid_tbls\n"); 4057 4058 kfree(phba->ep_array); 4059 phba->ep_array = NULL; 4060 ret = -ENOMEM; 4061 4062 goto free_memory; 4063 } 4064 4065 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4066 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4067 4068 ptr_cid_info = phba->cid_array_info[ulp_num]; 4069 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4070 phba->phwi_ctrlr->wrb_context[i].cid; 4071 4072 } 4073 4074 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4075 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4076 ptr_cid_info = phba->cid_array_info[ulp_num]; 4077 4078 ptr_cid_info->cid_alloc = 0; 4079 ptr_cid_info->cid_free = 0; 4080 } 4081 } 4082 return 0; 4083 4084 free_memory: 4085 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4086 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4087 ptr_cid_info = phba->cid_array_info[ulp_num]; 4088 4089 if (ptr_cid_info) { 4090 kfree(ptr_cid_info->cid_array); 4091 kfree(ptr_cid_info); 4092 phba->cid_array_info[ulp_num] = NULL; 4093 } 4094 } 4095 } 4096 4097 return ret; 4098 } 4099 4100 static void hwi_enable_intr(struct beiscsi_hba *phba) 4101 { 4102 struct be_ctrl_info *ctrl = &phba->ctrl; 4103 struct hwi_controller *phwi_ctrlr; 4104 struct hwi_context_memory *phwi_context; 4105 struct be_queue_info *eq; 4106 u8 __iomem *addr; 4107 u32 reg, i; 4108 u32 enabled; 4109 4110 phwi_ctrlr = phba->phwi_ctrlr; 4111 phwi_context = phwi_ctrlr->phwi_ctxt; 4112 4113 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4114 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4115 reg = ioread32(addr); 4116 4117 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4118 if (!enabled) { 4119 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4120 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4121 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4122 iowrite32(reg, addr); 4123 } 4124 4125 if (!phba->pcidev->msix_enabled) { 4126 eq = &phwi_context->be_eq[0].q; 4127 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4128 "BM_%d : eq->id=%d\n", eq->id); 4129 4130 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4131 } else { 4132 for (i = 0; i <= phba->num_cpus; i++) { 4133 eq = &phwi_context->be_eq[i].q; 4134 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4135 "BM_%d : eq->id=%d\n", eq->id); 4136 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4137 } 4138 } 4139 } 4140 4141 static void hwi_disable_intr(struct beiscsi_hba *phba) 4142 { 4143 struct be_ctrl_info *ctrl = &phba->ctrl; 4144 4145 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4146 u32 reg = ioread32(addr); 4147 4148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4149 if (enabled) { 4150 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4151 iowrite32(reg, addr); 4152 } else 4153 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4154 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4155 } 4156 4157 static int beiscsi_init_port(struct beiscsi_hba *phba) 4158 { 4159 int ret; 4160 4161 ret = hwi_init_controller(phba); 4162 if (ret < 0) { 4163 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4164 "BM_%d : init controller failed\n"); 4165 return ret; 4166 } 4167 ret = beiscsi_init_sgl_handle(phba); 4168 if (ret < 0) { 4169 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4170 "BM_%d : init sgl handles failed\n"); 4171 goto cleanup_port; 4172 } 4173 4174 ret = hba_setup_cid_tbls(phba); 4175 if (ret < 0) { 4176 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4177 "BM_%d : setup CID table failed\n"); 4178 kfree(phba->io_sgl_hndl_base); 4179 kfree(phba->eh_sgl_hndl_base); 4180 goto cleanup_port; 4181 } 4182 return ret; 4183 4184 cleanup_port: 4185 hwi_cleanup_port(phba); 4186 return ret; 4187 } 4188 4189 static void beiscsi_cleanup_port(struct beiscsi_hba *phba) 4190 { 4191 struct ulp_cid_info *ptr_cid_info = NULL; 4192 int ulp_num; 4193 4194 kfree(phba->io_sgl_hndl_base); 4195 kfree(phba->eh_sgl_hndl_base); 4196 kfree(phba->ep_array); 4197 kfree(phba->conn_table); 4198 4199 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4200 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4201 ptr_cid_info = phba->cid_array_info[ulp_num]; 4202 4203 if (ptr_cid_info) { 4204 kfree(ptr_cid_info->cid_array); 4205 kfree(ptr_cid_info); 4206 phba->cid_array_info[ulp_num] = NULL; 4207 } 4208 } 4209 } 4210 } 4211 4212 /** 4213 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4214 * @beiscsi_conn: ptr to the conn to be cleaned up 4215 * @task: ptr to iscsi_task resource to be freed. 4216 * 4217 * Free driver mgmt resources binded to CXN. 4218 **/ 4219 void 4220 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4221 struct iscsi_task *task) 4222 { 4223 struct beiscsi_io_task *io_task; 4224 struct beiscsi_hba *phba = beiscsi_conn->phba; 4225 struct hwi_wrb_context *pwrb_context; 4226 struct hwi_controller *phwi_ctrlr; 4227 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4228 beiscsi_conn->beiscsi_conn_cid); 4229 4230 phwi_ctrlr = phba->phwi_ctrlr; 4231 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4232 4233 io_task = task->dd_data; 4234 4235 if (io_task->pwrb_handle) { 4236 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4237 io_task->pwrb_handle = NULL; 4238 } 4239 4240 if (io_task->psgl_handle) { 4241 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4242 io_task->psgl_handle = NULL; 4243 } 4244 4245 if (io_task->mtask_addr) { 4246 dma_unmap_single(&phba->pcidev->dev, 4247 io_task->mtask_addr, 4248 io_task->mtask_data_count, 4249 DMA_TO_DEVICE); 4250 io_task->mtask_addr = 0; 4251 } 4252 } 4253 4254 /** 4255 * beiscsi_cleanup_task()- Free driver resources of the task 4256 * @task: ptr to the iscsi task 4257 * 4258 **/ 4259 static void beiscsi_cleanup_task(struct iscsi_task *task) 4260 { 4261 struct beiscsi_io_task *io_task = task->dd_data; 4262 struct iscsi_conn *conn = task->conn; 4263 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4264 struct beiscsi_hba *phba = beiscsi_conn->phba; 4265 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4266 struct hwi_wrb_context *pwrb_context; 4267 struct hwi_controller *phwi_ctrlr; 4268 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4269 beiscsi_conn->beiscsi_conn_cid); 4270 4271 phwi_ctrlr = phba->phwi_ctrlr; 4272 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4273 4274 if (io_task->cmd_bhs) { 4275 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4276 io_task->bhs_pa.u.a64.address); 4277 io_task->cmd_bhs = NULL; 4278 task->hdr = NULL; 4279 } 4280 4281 if (task->sc) { 4282 if (io_task->pwrb_handle) { 4283 free_wrb_handle(phba, pwrb_context, 4284 io_task->pwrb_handle); 4285 io_task->pwrb_handle = NULL; 4286 } 4287 4288 if (io_task->psgl_handle) { 4289 free_io_sgl_handle(phba, io_task->psgl_handle); 4290 io_task->psgl_handle = NULL; 4291 } 4292 4293 if (io_task->scsi_cmnd) { 4294 if (io_task->num_sg) 4295 scsi_dma_unmap(io_task->scsi_cmnd); 4296 io_task->scsi_cmnd = NULL; 4297 } 4298 } else { 4299 if (!beiscsi_conn->login_in_progress) 4300 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4301 } 4302 } 4303 4304 void 4305 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4306 struct beiscsi_offload_params *params) 4307 { 4308 struct wrb_handle *pwrb_handle; 4309 struct hwi_wrb_context *pwrb_context = NULL; 4310 struct beiscsi_hba *phba = beiscsi_conn->phba; 4311 struct iscsi_task *task = beiscsi_conn->task; 4312 struct iscsi_session *session = task->conn->session; 4313 u32 doorbell = 0; 4314 4315 /* 4316 * We can always use 0 here because it is reserved by libiscsi for 4317 * login/startup related tasks. 4318 */ 4319 beiscsi_conn->login_in_progress = 0; 4320 spin_lock_bh(&session->back_lock); 4321 beiscsi_cleanup_task(task); 4322 spin_unlock_bh(&session->back_lock); 4323 4324 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid, 4325 &pwrb_context); 4326 4327 /* Check for the adapter family */ 4328 if (is_chip_be2_be3r(phba)) 4329 beiscsi_offload_cxn_v0(params, pwrb_handle, 4330 phba->init_mem, 4331 pwrb_context); 4332 else 4333 beiscsi_offload_cxn_v2(params, pwrb_handle, 4334 pwrb_context); 4335 4336 be_dws_le_to_cpu(pwrb_handle->pwrb, 4337 sizeof(struct iscsi_target_context_update_wrb)); 4338 4339 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4340 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4341 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4342 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4343 iowrite32(doorbell, phba->db_va + 4344 beiscsi_conn->doorbell_offset); 4345 4346 /* 4347 * There is no completion for CONTEXT_UPDATE. The completion of next 4348 * WRB posted guarantees FW's processing and DMA'ing of it. 4349 * Use beiscsi_put_wrb_handle to put it back in the pool which makes 4350 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn. 4351 */ 4352 beiscsi_put_wrb_handle(pwrb_context, pwrb_handle, 4353 phba->params.wrbs_per_cxn); 4354 beiscsi_log(phba, KERN_INFO, 4355 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4356 "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n", 4357 pwrb_handle, pwrb_context->free_index, 4358 pwrb_context->wrb_handles_available); 4359 } 4360 4361 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4362 int *index, int *age) 4363 { 4364 *index = (int)itt; 4365 if (age) 4366 *age = conn->session->age; 4367 } 4368 4369 /** 4370 * beiscsi_alloc_pdu - allocates pdu and related resources 4371 * @task: libiscsi task 4372 * @opcode: opcode of pdu for task 4373 * 4374 * This is called with the session lock held. It will allocate 4375 * the wrb and sgl if needed for the command. And it will prep 4376 * the pdu's itt. beiscsi_parse_pdu will later translate 4377 * the pdu itt to the libiscsi task itt. 4378 */ 4379 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4380 { 4381 struct beiscsi_io_task *io_task = task->dd_data; 4382 struct iscsi_conn *conn = task->conn; 4383 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4384 struct beiscsi_hba *phba = beiscsi_conn->phba; 4385 struct hwi_wrb_context *pwrb_context; 4386 struct hwi_controller *phwi_ctrlr; 4387 itt_t itt; 4388 uint16_t cri_index = 0; 4389 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4390 dma_addr_t paddr; 4391 4392 io_task->cmd_bhs = dma_pool_alloc(beiscsi_sess->bhs_pool, 4393 GFP_ATOMIC, &paddr); 4394 if (!io_task->cmd_bhs) 4395 return -ENOMEM; 4396 io_task->bhs_pa.u.a64.address = paddr; 4397 io_task->libiscsi_itt = (itt_t)task->itt; 4398 io_task->conn = beiscsi_conn; 4399 4400 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4401 task->hdr_max = sizeof(struct be_cmd_bhs); 4402 io_task->psgl_handle = NULL; 4403 io_task->pwrb_handle = NULL; 4404 4405 if (task->sc) { 4406 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4407 if (!io_task->psgl_handle) { 4408 beiscsi_log(phba, KERN_ERR, 4409 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4410 "BM_%d : Alloc of IO_SGL_ICD Failed" 4411 "for the CID : %d\n", 4412 beiscsi_conn->beiscsi_conn_cid); 4413 goto free_hndls; 4414 } 4415 io_task->pwrb_handle = alloc_wrb_handle(phba, 4416 beiscsi_conn->beiscsi_conn_cid, 4417 &io_task->pwrb_context); 4418 if (!io_task->pwrb_handle) { 4419 beiscsi_log(phba, KERN_ERR, 4420 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4421 "BM_%d : Alloc of WRB_HANDLE Failed" 4422 "for the CID : %d\n", 4423 beiscsi_conn->beiscsi_conn_cid); 4424 goto free_io_hndls; 4425 } 4426 } else { 4427 io_task->scsi_cmnd = NULL; 4428 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4429 beiscsi_conn->task = task; 4430 if (!beiscsi_conn->login_in_progress) { 4431 io_task->psgl_handle = (struct sgl_handle *) 4432 alloc_mgmt_sgl_handle(phba); 4433 if (!io_task->psgl_handle) { 4434 beiscsi_log(phba, KERN_ERR, 4435 BEISCSI_LOG_IO | 4436 BEISCSI_LOG_CONFIG, 4437 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4438 "for the CID : %d\n", 4439 beiscsi_conn-> 4440 beiscsi_conn_cid); 4441 goto free_hndls; 4442 } 4443 4444 beiscsi_conn->login_in_progress = 1; 4445 beiscsi_conn->plogin_sgl_handle = 4446 io_task->psgl_handle; 4447 io_task->pwrb_handle = 4448 alloc_wrb_handle(phba, 4449 beiscsi_conn->beiscsi_conn_cid, 4450 &io_task->pwrb_context); 4451 if (!io_task->pwrb_handle) { 4452 beiscsi_log(phba, KERN_ERR, 4453 BEISCSI_LOG_IO | 4454 BEISCSI_LOG_CONFIG, 4455 "BM_%d : Alloc of WRB_HANDLE Failed" 4456 "for the CID : %d\n", 4457 beiscsi_conn-> 4458 beiscsi_conn_cid); 4459 goto free_mgmt_hndls; 4460 } 4461 beiscsi_conn->plogin_wrb_handle = 4462 io_task->pwrb_handle; 4463 4464 } else { 4465 io_task->psgl_handle = 4466 beiscsi_conn->plogin_sgl_handle; 4467 io_task->pwrb_handle = 4468 beiscsi_conn->plogin_wrb_handle; 4469 } 4470 } else { 4471 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4472 if (!io_task->psgl_handle) { 4473 beiscsi_log(phba, KERN_ERR, 4474 BEISCSI_LOG_IO | 4475 BEISCSI_LOG_CONFIG, 4476 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4477 "for the CID : %d\n", 4478 beiscsi_conn-> 4479 beiscsi_conn_cid); 4480 goto free_hndls; 4481 } 4482 io_task->pwrb_handle = 4483 alloc_wrb_handle(phba, 4484 beiscsi_conn->beiscsi_conn_cid, 4485 &io_task->pwrb_context); 4486 if (!io_task->pwrb_handle) { 4487 beiscsi_log(phba, KERN_ERR, 4488 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4489 "BM_%d : Alloc of WRB_HANDLE Failed" 4490 "for the CID : %d\n", 4491 beiscsi_conn->beiscsi_conn_cid); 4492 goto free_mgmt_hndls; 4493 } 4494 4495 } 4496 } 4497 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4498 wrb_index << 16) | (unsigned int) 4499 (io_task->psgl_handle->sgl_index)); 4500 io_task->pwrb_handle->pio_handle = task; 4501 4502 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4503 return 0; 4504 4505 free_io_hndls: 4506 free_io_sgl_handle(phba, io_task->psgl_handle); 4507 goto free_hndls; 4508 free_mgmt_hndls: 4509 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4510 io_task->psgl_handle = NULL; 4511 free_hndls: 4512 phwi_ctrlr = phba->phwi_ctrlr; 4513 cri_index = BE_GET_CRI_FROM_CID( 4514 beiscsi_conn->beiscsi_conn_cid); 4515 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4516 if (io_task->pwrb_handle) 4517 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4518 io_task->pwrb_handle = NULL; 4519 dma_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4520 io_task->bhs_pa.u.a64.address); 4521 io_task->cmd_bhs = NULL; 4522 return -ENOMEM; 4523 } 4524 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4525 unsigned int num_sg, unsigned int xferlen, 4526 unsigned int writedir) 4527 { 4528 4529 struct beiscsi_io_task *io_task = task->dd_data; 4530 struct iscsi_conn *conn = task->conn; 4531 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4532 struct beiscsi_hba *phba = beiscsi_conn->phba; 4533 struct iscsi_wrb *pwrb = NULL; 4534 unsigned int doorbell = 0; 4535 4536 pwrb = io_task->pwrb_handle->pwrb; 4537 4538 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4539 4540 if (writedir) { 4541 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4542 INI_WR_CMD); 4543 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4544 } else { 4545 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4546 INI_RD_CMD); 4547 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4548 } 4549 4550 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4551 type, pwrb); 4552 4553 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4554 cpu_to_be16(*(unsigned short *) 4555 &io_task->cmd_bhs->iscsi_hdr.lun)); 4556 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4557 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4558 io_task->pwrb_handle->wrb_index); 4559 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4560 be32_to_cpu(task->cmdsn)); 4561 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4562 io_task->psgl_handle->sgl_index); 4563 4564 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4565 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4566 io_task->pwrb_handle->wrb_index); 4567 if (io_task->pwrb_context->plast_wrb) 4568 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4569 io_task->pwrb_context->plast_wrb, 4570 io_task->pwrb_handle->wrb_index); 4571 io_task->pwrb_context->plast_wrb = pwrb; 4572 4573 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4574 4575 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4576 doorbell |= (io_task->pwrb_handle->wrb_index & 4577 DB_DEF_PDU_WRB_INDEX_MASK) << 4578 DB_DEF_PDU_WRB_INDEX_SHIFT; 4579 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4580 iowrite32(doorbell, phba->db_va + 4581 beiscsi_conn->doorbell_offset); 4582 return 0; 4583 } 4584 4585 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4586 unsigned int num_sg, unsigned int xferlen, 4587 unsigned int writedir) 4588 { 4589 4590 struct beiscsi_io_task *io_task = task->dd_data; 4591 struct iscsi_conn *conn = task->conn; 4592 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4593 struct beiscsi_hba *phba = beiscsi_conn->phba; 4594 struct iscsi_wrb *pwrb = NULL; 4595 unsigned int doorbell = 0; 4596 4597 pwrb = io_task->pwrb_handle->pwrb; 4598 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4599 4600 if (writedir) { 4601 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4602 INI_WR_CMD); 4603 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4604 } else { 4605 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4606 INI_RD_CMD); 4607 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4608 } 4609 4610 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4611 type, pwrb); 4612 4613 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4614 cpu_to_be16(*(unsigned short *) 4615 &io_task->cmd_bhs->iscsi_hdr.lun)); 4616 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4617 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4618 io_task->pwrb_handle->wrb_index); 4619 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4620 be32_to_cpu(task->cmdsn)); 4621 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4622 io_task->psgl_handle->sgl_index); 4623 4624 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4625 4626 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4627 io_task->pwrb_handle->wrb_index); 4628 if (io_task->pwrb_context->plast_wrb) 4629 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4630 io_task->pwrb_context->plast_wrb, 4631 io_task->pwrb_handle->wrb_index); 4632 io_task->pwrb_context->plast_wrb = pwrb; 4633 4634 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4635 4636 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4637 doorbell |= (io_task->pwrb_handle->wrb_index & 4638 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4639 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4640 4641 iowrite32(doorbell, phba->db_va + 4642 beiscsi_conn->doorbell_offset); 4643 return 0; 4644 } 4645 4646 static int beiscsi_mtask(struct iscsi_task *task) 4647 { 4648 struct beiscsi_io_task *io_task = task->dd_data; 4649 struct iscsi_conn *conn = task->conn; 4650 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4651 struct beiscsi_hba *phba = beiscsi_conn->phba; 4652 struct iscsi_wrb *pwrb = NULL; 4653 unsigned int doorbell = 0; 4654 unsigned int cid; 4655 unsigned int pwrb_typeoffset = 0; 4656 int ret = 0; 4657 4658 cid = beiscsi_conn->beiscsi_conn_cid; 4659 pwrb = io_task->pwrb_handle->pwrb; 4660 4661 if (is_chip_be2_be3r(phba)) { 4662 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4663 be32_to_cpu(task->cmdsn)); 4664 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4665 io_task->pwrb_handle->wrb_index); 4666 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4667 io_task->psgl_handle->sgl_index); 4668 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4669 task->data_count); 4670 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4671 io_task->pwrb_handle->wrb_index); 4672 if (io_task->pwrb_context->plast_wrb) 4673 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, 4674 io_task->pwrb_context->plast_wrb, 4675 io_task->pwrb_handle->wrb_index); 4676 io_task->pwrb_context->plast_wrb = pwrb; 4677 4678 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4679 } else { 4680 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4681 be32_to_cpu(task->cmdsn)); 4682 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4683 io_task->pwrb_handle->wrb_index); 4684 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4685 io_task->psgl_handle->sgl_index); 4686 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4687 task->data_count); 4688 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4689 io_task->pwrb_handle->wrb_index); 4690 if (io_task->pwrb_context->plast_wrb) 4691 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, 4692 io_task->pwrb_context->plast_wrb, 4693 io_task->pwrb_handle->wrb_index); 4694 io_task->pwrb_context->plast_wrb = pwrb; 4695 4696 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4697 } 4698 4699 4700 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4701 case ISCSI_OP_LOGIN: 4702 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4703 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4704 ret = hwi_write_buffer(pwrb, task); 4705 break; 4706 case ISCSI_OP_NOOP_OUT: 4707 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4708 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4709 if (is_chip_be2_be3r(phba)) 4710 AMAP_SET_BITS(struct amap_iscsi_wrb, 4711 dmsg, pwrb, 1); 4712 else 4713 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4714 dmsg, pwrb, 1); 4715 } else { 4716 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4717 if (is_chip_be2_be3r(phba)) 4718 AMAP_SET_BITS(struct amap_iscsi_wrb, 4719 dmsg, pwrb, 0); 4720 else 4721 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4722 dmsg, pwrb, 0); 4723 } 4724 ret = hwi_write_buffer(pwrb, task); 4725 break; 4726 case ISCSI_OP_TEXT: 4727 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4728 ret = hwi_write_buffer(pwrb, task); 4729 break; 4730 case ISCSI_OP_SCSI_TMFUNC: 4731 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4732 ret = hwi_write_buffer(pwrb, task); 4733 break; 4734 case ISCSI_OP_LOGOUT: 4735 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4736 ret = hwi_write_buffer(pwrb, task); 4737 break; 4738 4739 default: 4740 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4741 "BM_%d : opcode =%d Not supported\n", 4742 task->hdr->opcode & ISCSI_OPCODE_MASK); 4743 4744 return -EINVAL; 4745 } 4746 4747 if (ret) 4748 return ret; 4749 4750 /* Set the task type */ 4751 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4752 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4753 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4754 4755 doorbell |= cid & DB_WRB_POST_CID_MASK; 4756 doorbell |= (io_task->pwrb_handle->wrb_index & 4757 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4758 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4759 iowrite32(doorbell, phba->db_va + 4760 beiscsi_conn->doorbell_offset); 4761 return 0; 4762 } 4763 4764 static int beiscsi_task_xmit(struct iscsi_task *task) 4765 { 4766 struct beiscsi_io_task *io_task = task->dd_data; 4767 struct scsi_cmnd *sc = task->sc; 4768 struct beiscsi_hba *phba; 4769 struct scatterlist *sg; 4770 int num_sg; 4771 unsigned int writedir = 0, xferlen = 0; 4772 4773 phba = io_task->conn->phba; 4774 /** 4775 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be 4776 * operational if FW still gets heartbeat from EP FW. Is management 4777 * path really needed to continue further? 4778 */ 4779 if (!beiscsi_hba_is_online(phba)) 4780 return -EIO; 4781 4782 if (!io_task->conn->login_in_progress) 4783 task->hdr->exp_statsn = 0; 4784 4785 if (!sc) 4786 return beiscsi_mtask(task); 4787 4788 io_task->scsi_cmnd = sc; 4789 io_task->num_sg = 0; 4790 num_sg = scsi_dma_map(sc); 4791 if (num_sg < 0) { 4792 beiscsi_log(phba, KERN_ERR, 4793 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 4794 "BM_%d : scsi_dma_map Failed " 4795 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 4796 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 4797 io_task->libiscsi_itt, scsi_bufflen(sc)); 4798 4799 return num_sg; 4800 } 4801 /** 4802 * For scsi cmd task, check num_sg before unmapping in cleanup_task. 4803 * For management task, cleanup_task checks mtask_addr before unmapping. 4804 */ 4805 io_task->num_sg = num_sg; 4806 xferlen = scsi_bufflen(sc); 4807 sg = scsi_sglist(sc); 4808 if (sc->sc_data_direction == DMA_TO_DEVICE) 4809 writedir = 1; 4810 else 4811 writedir = 0; 4812 4813 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4814 } 4815 4816 /** 4817 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4818 * @job: job to handle 4819 */ 4820 static int beiscsi_bsg_request(struct bsg_job *job) 4821 { 4822 struct Scsi_Host *shost; 4823 struct beiscsi_hba *phba; 4824 struct iscsi_bsg_request *bsg_req = job->request; 4825 int rc = -EINVAL; 4826 unsigned int tag; 4827 struct be_dma_mem nonemb_cmd; 4828 struct be_cmd_resp_hdr *resp; 4829 struct iscsi_bsg_reply *bsg_reply = job->reply; 4830 unsigned short status, extd_status; 4831 4832 shost = iscsi_job_to_shost(job); 4833 phba = iscsi_host_priv(shost); 4834 4835 if (!beiscsi_hba_is_online(phba)) { 4836 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 4837 "BM_%d : HBA in error 0x%lx\n", phba->state); 4838 return -ENXIO; 4839 } 4840 4841 switch (bsg_req->msgcode) { 4842 case ISCSI_BSG_HST_VENDOR: 4843 nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev, 4844 job->request_payload.payload_len, 4845 &nonemb_cmd.dma, GFP_KERNEL); 4846 if (nonemb_cmd.va == NULL) { 4847 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4848 "BM_%d : Failed to allocate memory for " 4849 "beiscsi_bsg_request\n"); 4850 return -ENOMEM; 4851 } 4852 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4853 &nonemb_cmd); 4854 if (!tag) { 4855 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4856 "BM_%d : MBX Tag Allocation Failed\n"); 4857 4858 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4859 nonemb_cmd.va, nonemb_cmd.dma); 4860 return -EAGAIN; 4861 } 4862 4863 rc = wait_event_interruptible_timeout( 4864 phba->ctrl.mcc_wait[tag], 4865 phba->ctrl.mcc_tag_status[tag], 4866 msecs_to_jiffies( 4867 BEISCSI_HOST_MBX_TIMEOUT)); 4868 4869 if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 4870 clear_bit(MCC_TAG_STATE_RUNNING, 4871 &phba->ctrl.ptag_state[tag].tag_state); 4872 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4873 nonemb_cmd.va, nonemb_cmd.dma); 4874 return -EIO; 4875 } 4876 extd_status = (phba->ctrl.mcc_tag_status[tag] & 4877 CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT; 4878 status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK; 4879 free_mcc_wrb(&phba->ctrl, tag); 4880 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4881 sg_copy_from_buffer(job->reply_payload.sg_list, 4882 job->reply_payload.sg_cnt, 4883 nonemb_cmd.va, (resp->response_length 4884 + sizeof(*resp))); 4885 bsg_reply->reply_payload_rcv_len = resp->response_length; 4886 bsg_reply->result = status; 4887 bsg_job_done(job, bsg_reply->result, 4888 bsg_reply->reply_payload_rcv_len); 4889 dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size, 4890 nonemb_cmd.va, nonemb_cmd.dma); 4891 if (status || extd_status) { 4892 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4893 "BM_%d : MBX Cmd Failed" 4894 " status = %d extd_status = %d\n", 4895 status, extd_status); 4896 4897 return -EIO; 4898 } else { 4899 rc = 0; 4900 } 4901 break; 4902 4903 default: 4904 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4905 "BM_%d : Unsupported bsg command: 0x%x\n", 4906 bsg_req->msgcode); 4907 break; 4908 } 4909 4910 return rc; 4911 } 4912 4913 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4914 { 4915 /* Set the logging parameter */ 4916 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4917 } 4918 4919 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle) 4920 { 4921 if (phba->boot_struct.boot_kset) 4922 return; 4923 4924 /* skip if boot work is already in progress */ 4925 if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state)) 4926 return; 4927 4928 phba->boot_struct.retry = 3; 4929 phba->boot_struct.tag = 0; 4930 phba->boot_struct.s_handle = s_handle; 4931 phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE; 4932 schedule_work(&phba->boot_work); 4933 } 4934 4935 /** 4936 * Boot flag info for iscsi-utilities 4937 * Bit 0 Block valid flag 4938 * Bit 1 Firmware booting selected 4939 */ 4940 #define BEISCSI_SYSFS_ISCSI_BOOT_FLAGS 3 4941 4942 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 4943 { 4944 struct beiscsi_hba *phba = data; 4945 struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess; 4946 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 4947 char *str = buf; 4948 int rc = -EPERM; 4949 4950 switch (type) { 4951 case ISCSI_BOOT_TGT_NAME: 4952 rc = sprintf(buf, "%.*s\n", 4953 (int)strlen(boot_sess->target_name), 4954 (char *)&boot_sess->target_name); 4955 break; 4956 case ISCSI_BOOT_TGT_IP_ADDR: 4957 if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4) 4958 rc = sprintf(buf, "%pI4\n", 4959 (char *)&boot_conn->dest_ipaddr.addr); 4960 else 4961 rc = sprintf(str, "%pI6\n", 4962 (char *)&boot_conn->dest_ipaddr.addr); 4963 break; 4964 case ISCSI_BOOT_TGT_PORT: 4965 rc = sprintf(str, "%d\n", boot_conn->dest_port); 4966 break; 4967 4968 case ISCSI_BOOT_TGT_CHAP_NAME: 4969 rc = sprintf(str, "%.*s\n", 4970 boot_conn->negotiated_login_options.auth_data.chap. 4971 target_chap_name_length, 4972 (char *)&boot_conn->negotiated_login_options. 4973 auth_data.chap.target_chap_name); 4974 break; 4975 case ISCSI_BOOT_TGT_CHAP_SECRET: 4976 rc = sprintf(str, "%.*s\n", 4977 boot_conn->negotiated_login_options.auth_data.chap. 4978 target_secret_length, 4979 (char *)&boot_conn->negotiated_login_options. 4980 auth_data.chap.target_secret); 4981 break; 4982 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 4983 rc = sprintf(str, "%.*s\n", 4984 boot_conn->negotiated_login_options.auth_data.chap. 4985 intr_chap_name_length, 4986 (char *)&boot_conn->negotiated_login_options. 4987 auth_data.chap.intr_chap_name); 4988 break; 4989 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 4990 rc = sprintf(str, "%.*s\n", 4991 boot_conn->negotiated_login_options.auth_data.chap. 4992 intr_secret_length, 4993 (char *)&boot_conn->negotiated_login_options. 4994 auth_data.chap.intr_secret); 4995 break; 4996 case ISCSI_BOOT_TGT_FLAGS: 4997 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 4998 break; 4999 case ISCSI_BOOT_TGT_NIC_ASSOC: 5000 rc = sprintf(str, "0\n"); 5001 break; 5002 } 5003 return rc; 5004 } 5005 5006 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 5007 { 5008 struct beiscsi_hba *phba = data; 5009 char *str = buf; 5010 int rc = -EPERM; 5011 5012 switch (type) { 5013 case ISCSI_BOOT_INI_INITIATOR_NAME: 5014 rc = sprintf(str, "%s\n", 5015 phba->boot_struct.boot_sess.initiator_iscsiname); 5016 break; 5017 } 5018 return rc; 5019 } 5020 5021 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 5022 { 5023 struct beiscsi_hba *phba = data; 5024 char *str = buf; 5025 int rc = -EPERM; 5026 5027 switch (type) { 5028 case ISCSI_BOOT_ETH_FLAGS: 5029 rc = sprintf(str, "%d\n", BEISCSI_SYSFS_ISCSI_BOOT_FLAGS); 5030 break; 5031 case ISCSI_BOOT_ETH_INDEX: 5032 rc = sprintf(str, "0\n"); 5033 break; 5034 case ISCSI_BOOT_ETH_MAC: 5035 rc = beiscsi_get_macaddr(str, phba); 5036 break; 5037 } 5038 return rc; 5039 } 5040 5041 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 5042 { 5043 umode_t rc = 0; 5044 5045 switch (type) { 5046 case ISCSI_BOOT_TGT_NAME: 5047 case ISCSI_BOOT_TGT_IP_ADDR: 5048 case ISCSI_BOOT_TGT_PORT: 5049 case ISCSI_BOOT_TGT_CHAP_NAME: 5050 case ISCSI_BOOT_TGT_CHAP_SECRET: 5051 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 5052 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 5053 case ISCSI_BOOT_TGT_NIC_ASSOC: 5054 case ISCSI_BOOT_TGT_FLAGS: 5055 rc = S_IRUGO; 5056 break; 5057 } 5058 return rc; 5059 } 5060 5061 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 5062 { 5063 umode_t rc = 0; 5064 5065 switch (type) { 5066 case ISCSI_BOOT_INI_INITIATOR_NAME: 5067 rc = S_IRUGO; 5068 break; 5069 } 5070 return rc; 5071 } 5072 5073 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 5074 { 5075 umode_t rc = 0; 5076 5077 switch (type) { 5078 case ISCSI_BOOT_ETH_FLAGS: 5079 case ISCSI_BOOT_ETH_MAC: 5080 case ISCSI_BOOT_ETH_INDEX: 5081 rc = S_IRUGO; 5082 break; 5083 } 5084 return rc; 5085 } 5086 5087 static void beiscsi_boot_kobj_release(void *data) 5088 { 5089 struct beiscsi_hba *phba = data; 5090 5091 scsi_host_put(phba->shost); 5092 } 5093 5094 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba) 5095 { 5096 struct boot_struct *bs = &phba->boot_struct; 5097 struct iscsi_boot_kobj *boot_kobj; 5098 5099 if (bs->boot_kset) { 5100 __beiscsi_log(phba, KERN_ERR, 5101 "BM_%d: boot_kset already created\n"); 5102 return 0; 5103 } 5104 5105 bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 5106 if (!bs->boot_kset) { 5107 __beiscsi_log(phba, KERN_ERR, 5108 "BM_%d: boot_kset alloc failed\n"); 5109 return -ENOMEM; 5110 } 5111 5112 /* get shost ref because the show function will refer phba */ 5113 if (!scsi_host_get(phba->shost)) 5114 goto free_kset; 5115 5116 boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba, 5117 beiscsi_show_boot_tgt_info, 5118 beiscsi_tgt_get_attr_visibility, 5119 beiscsi_boot_kobj_release); 5120 if (!boot_kobj) 5121 goto put_shost; 5122 5123 if (!scsi_host_get(phba->shost)) 5124 goto free_kset; 5125 5126 boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba, 5127 beiscsi_show_boot_ini_info, 5128 beiscsi_ini_get_attr_visibility, 5129 beiscsi_boot_kobj_release); 5130 if (!boot_kobj) 5131 goto put_shost; 5132 5133 if (!scsi_host_get(phba->shost)) 5134 goto free_kset; 5135 5136 boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba, 5137 beiscsi_show_boot_eth_info, 5138 beiscsi_eth_get_attr_visibility, 5139 beiscsi_boot_kobj_release); 5140 if (!boot_kobj) 5141 goto put_shost; 5142 5143 return 0; 5144 5145 put_shost: 5146 scsi_host_put(phba->shost); 5147 free_kset: 5148 iscsi_boot_destroy_kset(bs->boot_kset); 5149 bs->boot_kset = NULL; 5150 return -ENOMEM; 5151 } 5152 5153 static void beiscsi_boot_work(struct work_struct *work) 5154 { 5155 struct beiscsi_hba *phba = 5156 container_of(work, struct beiscsi_hba, boot_work); 5157 struct boot_struct *bs = &phba->boot_struct; 5158 unsigned int tag = 0; 5159 5160 if (!beiscsi_hba_is_online(phba)) 5161 return; 5162 5163 beiscsi_log(phba, KERN_INFO, 5164 BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, 5165 "BM_%d : %s action %d\n", 5166 __func__, phba->boot_struct.action); 5167 5168 switch (phba->boot_struct.action) { 5169 case BEISCSI_BOOT_REOPEN_SESS: 5170 tag = beiscsi_boot_reopen_sess(phba); 5171 break; 5172 case BEISCSI_BOOT_GET_SHANDLE: 5173 tag = __beiscsi_boot_get_shandle(phba, 1); 5174 break; 5175 case BEISCSI_BOOT_GET_SINFO: 5176 tag = beiscsi_boot_get_sinfo(phba); 5177 break; 5178 case BEISCSI_BOOT_LOGOUT_SESS: 5179 tag = beiscsi_boot_logout_sess(phba); 5180 break; 5181 case BEISCSI_BOOT_CREATE_KSET: 5182 beiscsi_boot_create_kset(phba); 5183 /** 5184 * updated boot_kset is made visible to all before 5185 * ending the boot work. 5186 */ 5187 mb(); 5188 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5189 return; 5190 } 5191 if (!tag) { 5192 if (bs->retry--) 5193 schedule_work(&phba->boot_work); 5194 else 5195 clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state); 5196 } 5197 } 5198 5199 static void beiscsi_eqd_update_work(struct work_struct *work) 5200 { 5201 struct hwi_context_memory *phwi_context; 5202 struct be_set_eqd set_eqd[MAX_CPUS]; 5203 struct hwi_controller *phwi_ctrlr; 5204 struct be_eq_obj *pbe_eq; 5205 struct beiscsi_hba *phba; 5206 unsigned int pps, delta; 5207 struct be_aic_obj *aic; 5208 int eqd, i, num = 0; 5209 unsigned long now; 5210 5211 phba = container_of(work, struct beiscsi_hba, eqd_update.work); 5212 if (!beiscsi_hba_is_online(phba)) 5213 return; 5214 5215 phwi_ctrlr = phba->phwi_ctrlr; 5216 phwi_context = phwi_ctrlr->phwi_ctxt; 5217 5218 for (i = 0; i <= phba->num_cpus; i++) { 5219 aic = &phba->aic_obj[i]; 5220 pbe_eq = &phwi_context->be_eq[i]; 5221 now = jiffies; 5222 if (!aic->jiffies || time_before(now, aic->jiffies) || 5223 pbe_eq->cq_count < aic->eq_prev) { 5224 aic->jiffies = now; 5225 aic->eq_prev = pbe_eq->cq_count; 5226 continue; 5227 } 5228 delta = jiffies_to_msecs(now - aic->jiffies); 5229 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5230 eqd = (pps / 1500) << 2; 5231 5232 if (eqd < 8) 5233 eqd = 0; 5234 eqd = min_t(u32, eqd, BEISCSI_EQ_DELAY_MAX); 5235 eqd = max_t(u32, eqd, BEISCSI_EQ_DELAY_MIN); 5236 5237 aic->jiffies = now; 5238 aic->eq_prev = pbe_eq->cq_count; 5239 5240 if (eqd != aic->prev_eqd) { 5241 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5242 set_eqd[num].eq_id = pbe_eq->q.id; 5243 aic->prev_eqd = eqd; 5244 num++; 5245 } 5246 } 5247 if (num) 5248 /* completion of this is ignored */ 5249 beiscsi_modify_eq_delay(phba, set_eqd, num); 5250 5251 schedule_delayed_work(&phba->eqd_update, 5252 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5253 } 5254 5255 static void beiscsi_hw_tpe_check(struct timer_list *t) 5256 { 5257 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5258 u32 wait; 5259 5260 /* if not TPE, do nothing */ 5261 if (!beiscsi_detect_tpe(phba)) 5262 return; 5263 5264 /* wait default 4000ms before recovering */ 5265 wait = 4000; 5266 if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL) 5267 wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL; 5268 queue_delayed_work(phba->wq, &phba->recover_port, 5269 msecs_to_jiffies(wait)); 5270 } 5271 5272 static void beiscsi_hw_health_check(struct timer_list *t) 5273 { 5274 struct beiscsi_hba *phba = from_timer(phba, t, hw_check); 5275 5276 beiscsi_detect_ue(phba); 5277 if (beiscsi_detect_ue(phba)) { 5278 __beiscsi_log(phba, KERN_ERR, 5279 "BM_%d : port in error: %lx\n", phba->state); 5280 /* sessions are no longer valid, so first fail the sessions */ 5281 queue_work(phba->wq, &phba->sess_work); 5282 5283 /* detect UER supported */ 5284 if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) 5285 return; 5286 /* modify this timer to check TPE */ 5287 phba->hw_check.function = beiscsi_hw_tpe_check; 5288 } 5289 5290 mod_timer(&phba->hw_check, 5291 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5292 } 5293 5294 /* 5295 * beiscsi_enable_port()- Enables the disabled port. 5296 * Only port resources freed in disable function are reallocated. 5297 * This is called in HBA error handling path. 5298 * 5299 * @phba: Instance of driver private structure 5300 * 5301 **/ 5302 static int beiscsi_enable_port(struct beiscsi_hba *phba) 5303 { 5304 struct hwi_context_memory *phwi_context; 5305 struct hwi_controller *phwi_ctrlr; 5306 struct be_eq_obj *pbe_eq; 5307 int ret, i; 5308 5309 if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) { 5310 __beiscsi_log(phba, KERN_ERR, 5311 "BM_%d : %s : port is online %lx\n", 5312 __func__, phba->state); 5313 return 0; 5314 } 5315 5316 ret = beiscsi_init_sliport(phba); 5317 if (ret) 5318 return ret; 5319 5320 be2iscsi_enable_msix(phba); 5321 5322 beiscsi_get_params(phba); 5323 beiscsi_set_host_data(phba); 5324 /* Re-enable UER. If different TPE occurs then it is recoverable. */ 5325 beiscsi_set_uer_feature(phba); 5326 5327 phba->shost->max_id = phba->params.cxns_per_ctrl; 5328 phba->shost->can_queue = phba->params.ios_per_ctrl; 5329 ret = beiscsi_init_port(phba); 5330 if (ret < 0) { 5331 __beiscsi_log(phba, KERN_ERR, 5332 "BM_%d : init port failed\n"); 5333 goto disable_msix; 5334 } 5335 5336 for (i = 0; i < MAX_MCC_CMD; i++) { 5337 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5338 phba->ctrl.mcc_tag[i] = i + 1; 5339 phba->ctrl.mcc_tag_status[i + 1] = 0; 5340 phba->ctrl.mcc_tag_available++; 5341 } 5342 5343 phwi_ctrlr = phba->phwi_ctrlr; 5344 phwi_context = phwi_ctrlr->phwi_ctxt; 5345 for (i = 0; i < phba->num_cpus; i++) { 5346 pbe_eq = &phwi_context->be_eq[i]; 5347 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5348 } 5349 5350 i = (phba->pcidev->msix_enabled) ? i : 0; 5351 /* Work item for MCC handling */ 5352 pbe_eq = &phwi_context->be_eq[i]; 5353 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5354 5355 ret = beiscsi_init_irqs(phba); 5356 if (ret < 0) { 5357 __beiscsi_log(phba, KERN_ERR, 5358 "BM_%d : setup IRQs failed %d\n", ret); 5359 goto cleanup_port; 5360 } 5361 hwi_enable_intr(phba); 5362 /* port operational: clear all error bits */ 5363 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5364 __beiscsi_log(phba, KERN_INFO, 5365 "BM_%d : port online: 0x%lx\n", phba->state); 5366 5367 /* start hw_check timer and eqd_update work */ 5368 schedule_delayed_work(&phba->eqd_update, 5369 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5370 5371 /** 5372 * Timer function gets modified for TPE detection. 5373 * Always reinit to do health check first. 5374 */ 5375 phba->hw_check.function = beiscsi_hw_health_check; 5376 mod_timer(&phba->hw_check, 5377 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5378 return 0; 5379 5380 cleanup_port: 5381 for (i = 0; i < phba->num_cpus; i++) { 5382 pbe_eq = &phwi_context->be_eq[i]; 5383 irq_poll_disable(&pbe_eq->iopoll); 5384 } 5385 hwi_cleanup_port(phba); 5386 5387 disable_msix: 5388 pci_free_irq_vectors(phba->pcidev); 5389 return ret; 5390 } 5391 5392 /* 5393 * beiscsi_disable_port()- Disable port and cleanup driver resources. 5394 * This is called in HBA error handling and driver removal. 5395 * @phba: Instance Priv structure 5396 * @unload: indicate driver is unloading 5397 * 5398 * Free the OS and HW resources held by the driver 5399 **/ 5400 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload) 5401 { 5402 struct hwi_context_memory *phwi_context; 5403 struct hwi_controller *phwi_ctrlr; 5404 struct be_eq_obj *pbe_eq; 5405 unsigned int i; 5406 5407 if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state)) 5408 return; 5409 5410 phwi_ctrlr = phba->phwi_ctrlr; 5411 phwi_context = phwi_ctrlr->phwi_ctxt; 5412 hwi_disable_intr(phba); 5413 beiscsi_free_irqs(phba); 5414 pci_free_irq_vectors(phba->pcidev); 5415 5416 for (i = 0; i < phba->num_cpus; i++) { 5417 pbe_eq = &phwi_context->be_eq[i]; 5418 irq_poll_disable(&pbe_eq->iopoll); 5419 } 5420 cancel_delayed_work_sync(&phba->eqd_update); 5421 cancel_work_sync(&phba->boot_work); 5422 /* WQ might be running cancel queued mcc_work if we are not exiting */ 5423 if (!unload && beiscsi_hba_in_error(phba)) { 5424 pbe_eq = &phwi_context->be_eq[i]; 5425 cancel_work_sync(&pbe_eq->mcc_work); 5426 } 5427 hwi_cleanup_port(phba); 5428 beiscsi_cleanup_port(phba); 5429 } 5430 5431 static void beiscsi_sess_work(struct work_struct *work) 5432 { 5433 struct beiscsi_hba *phba; 5434 5435 phba = container_of(work, struct beiscsi_hba, sess_work); 5436 /* 5437 * This work gets scheduled only in case of HBA error. 5438 * Old sessions are gone so need to be re-established. 5439 * iscsi_session_failure needs process context hence this work. 5440 */ 5441 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5442 } 5443 5444 static void beiscsi_recover_port(struct work_struct *work) 5445 { 5446 struct beiscsi_hba *phba; 5447 5448 phba = container_of(work, struct beiscsi_hba, recover_port.work); 5449 beiscsi_disable_port(phba, 0); 5450 beiscsi_enable_port(phba); 5451 } 5452 5453 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5454 pci_channel_state_t state) 5455 { 5456 struct beiscsi_hba *phba = NULL; 5457 5458 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5459 set_bit(BEISCSI_HBA_PCI_ERR, &phba->state); 5460 5461 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5462 "BM_%d : EEH error detected\n"); 5463 5464 /* first stop UE detection when PCI error detected */ 5465 del_timer_sync(&phba->hw_check); 5466 cancel_delayed_work_sync(&phba->recover_port); 5467 5468 /* sessions are no longer valid, so first fail the sessions */ 5469 iscsi_host_for_each_session(phba->shost, beiscsi_session_fail); 5470 beiscsi_disable_port(phba, 0); 5471 5472 if (state == pci_channel_io_perm_failure) { 5473 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5474 "BM_%d : EEH : State PERM Failure"); 5475 return PCI_ERS_RESULT_DISCONNECT; 5476 } 5477 5478 pci_disable_device(pdev); 5479 5480 /* The error could cause the FW to trigger a flash debug dump. 5481 * Resetting the card while flash dump is in progress 5482 * can cause it not to recover; wait for it to finish. 5483 * Wait only for first function as it is needed only once per 5484 * adapter. 5485 **/ 5486 if (pdev->devfn == 0) 5487 ssleep(30); 5488 5489 return PCI_ERS_RESULT_NEED_RESET; 5490 } 5491 5492 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5493 { 5494 struct beiscsi_hba *phba = NULL; 5495 int status = 0; 5496 5497 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5498 5499 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5500 "BM_%d : EEH Reset\n"); 5501 5502 status = pci_enable_device(pdev); 5503 if (status) 5504 return PCI_ERS_RESULT_DISCONNECT; 5505 5506 pci_set_master(pdev); 5507 pci_set_power_state(pdev, PCI_D0); 5508 pci_restore_state(pdev); 5509 5510 status = beiscsi_check_fw_rdy(phba); 5511 if (status) { 5512 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5513 "BM_%d : EEH Reset Completed\n"); 5514 } else { 5515 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5516 "BM_%d : EEH Reset Completion Failure\n"); 5517 return PCI_ERS_RESULT_DISCONNECT; 5518 } 5519 5520 return PCI_ERS_RESULT_RECOVERED; 5521 } 5522 5523 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5524 { 5525 struct beiscsi_hba *phba; 5526 int ret; 5527 5528 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5529 pci_save_state(pdev); 5530 5531 ret = beiscsi_enable_port(phba); 5532 if (ret) 5533 __beiscsi_log(phba, KERN_ERR, 5534 "BM_%d : AER EEH resume failed\n"); 5535 } 5536 5537 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5538 const struct pci_device_id *id) 5539 { 5540 struct hwi_context_memory *phwi_context; 5541 struct hwi_controller *phwi_ctrlr; 5542 struct beiscsi_hba *phba = NULL; 5543 struct be_eq_obj *pbe_eq; 5544 unsigned int s_handle; 5545 char wq_name[20]; 5546 int ret, i; 5547 5548 ret = beiscsi_enable_pci(pcidev); 5549 if (ret < 0) { 5550 dev_err(&pcidev->dev, 5551 "beiscsi_dev_probe - Failed to enable pci device\n"); 5552 return ret; 5553 } 5554 5555 phba = beiscsi_hba_alloc(pcidev); 5556 if (!phba) { 5557 dev_err(&pcidev->dev, 5558 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5559 ret = -ENOMEM; 5560 goto disable_pci; 5561 } 5562 5563 /* Enable EEH reporting */ 5564 ret = pci_enable_pcie_error_reporting(pcidev); 5565 if (ret) 5566 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5567 "BM_%d : PCIe Error Reporting " 5568 "Enabling Failed\n"); 5569 5570 pci_save_state(pcidev); 5571 5572 /* Initialize Driver configuration Paramters */ 5573 beiscsi_hba_attrs_init(phba); 5574 5575 phba->mac_addr_set = false; 5576 5577 switch (pcidev->device) { 5578 case BE_DEVICE_ID1: 5579 case OC_DEVICE_ID1: 5580 case OC_DEVICE_ID2: 5581 phba->generation = BE_GEN2; 5582 phba->iotask_fn = beiscsi_iotask; 5583 dev_warn(&pcidev->dev, 5584 "Obsolete/Unsupported BE2 Adapter Family\n"); 5585 break; 5586 case BE_DEVICE_ID2: 5587 case OC_DEVICE_ID3: 5588 phba->generation = BE_GEN3; 5589 phba->iotask_fn = beiscsi_iotask; 5590 break; 5591 case OC_SKH_ID1: 5592 phba->generation = BE_GEN4; 5593 phba->iotask_fn = beiscsi_iotask_v2; 5594 break; 5595 default: 5596 phba->generation = 0; 5597 } 5598 5599 ret = be_ctrl_init(phba, pcidev); 5600 if (ret) { 5601 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5602 "BM_%d : be_ctrl_init failed\n"); 5603 goto free_hba; 5604 } 5605 5606 ret = beiscsi_init_sliport(phba); 5607 if (ret) 5608 goto free_hba; 5609 5610 spin_lock_init(&phba->io_sgl_lock); 5611 spin_lock_init(&phba->mgmt_sgl_lock); 5612 spin_lock_init(&phba->async_pdu_lock); 5613 ret = beiscsi_get_fw_config(&phba->ctrl, phba); 5614 if (ret != 0) { 5615 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5616 "BM_%d : Error getting fw config\n"); 5617 goto free_port; 5618 } 5619 beiscsi_get_port_name(&phba->ctrl, phba); 5620 beiscsi_get_params(phba); 5621 beiscsi_set_host_data(phba); 5622 beiscsi_set_uer_feature(phba); 5623 5624 be2iscsi_enable_msix(phba); 5625 5626 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5627 "BM_%d : num_cpus = %d\n", 5628 phba->num_cpus); 5629 5630 phba->shost->max_id = phba->params.cxns_per_ctrl; 5631 phba->shost->can_queue = phba->params.ios_per_ctrl; 5632 ret = beiscsi_get_memory(phba); 5633 if (ret < 0) { 5634 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5635 "BM_%d : alloc host mem failed\n"); 5636 goto free_port; 5637 } 5638 5639 ret = beiscsi_init_port(phba); 5640 if (ret < 0) { 5641 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5642 "BM_%d : init port failed\n"); 5643 beiscsi_free_mem(phba); 5644 goto free_port; 5645 } 5646 5647 for (i = 0; i < MAX_MCC_CMD; i++) { 5648 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5649 phba->ctrl.mcc_tag[i] = i + 1; 5650 phba->ctrl.mcc_tag_status[i + 1] = 0; 5651 phba->ctrl.mcc_tag_available++; 5652 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5653 sizeof(struct be_dma_mem)); 5654 } 5655 5656 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5657 5658 snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq", 5659 phba->shost->host_no); 5660 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); 5661 if (!phba->wq) { 5662 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5663 "BM_%d : beiscsi_dev_probe-" 5664 "Failed to allocate work queue\n"); 5665 ret = -ENOMEM; 5666 goto free_twq; 5667 } 5668 5669 INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work); 5670 5671 phwi_ctrlr = phba->phwi_ctrlr; 5672 phwi_context = phwi_ctrlr->phwi_ctxt; 5673 5674 for (i = 0; i < phba->num_cpus; i++) { 5675 pbe_eq = &phwi_context->be_eq[i]; 5676 irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll); 5677 } 5678 5679 i = (phba->pcidev->msix_enabled) ? i : 0; 5680 /* Work item for MCC handling */ 5681 pbe_eq = &phwi_context->be_eq[i]; 5682 INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work); 5683 5684 ret = beiscsi_init_irqs(phba); 5685 if (ret < 0) { 5686 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5687 "BM_%d : beiscsi_dev_probe-" 5688 "Failed to beiscsi_init_irqs\n"); 5689 goto disable_iopoll; 5690 } 5691 hwi_enable_intr(phba); 5692 5693 ret = iscsi_host_add(phba->shost, &phba->pcidev->dev); 5694 if (ret) 5695 goto free_irqs; 5696 5697 /* set online bit after port is operational */ 5698 set_bit(BEISCSI_HBA_ONLINE, &phba->state); 5699 __beiscsi_log(phba, KERN_INFO, 5700 "BM_%d : port online: 0x%lx\n", phba->state); 5701 5702 INIT_WORK(&phba->boot_work, beiscsi_boot_work); 5703 ret = beiscsi_boot_get_shandle(phba, &s_handle); 5704 if (ret > 0) { 5705 beiscsi_start_boot_work(phba, s_handle); 5706 /** 5707 * Set this bit after starting the work to let 5708 * probe handle it first. 5709 * ASYNC event can too schedule this work. 5710 */ 5711 set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state); 5712 } 5713 5714 beiscsi_iface_create_default(phba); 5715 schedule_delayed_work(&phba->eqd_update, 5716 msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); 5717 5718 INIT_WORK(&phba->sess_work, beiscsi_sess_work); 5719 INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port); 5720 /** 5721 * Start UE detection here. UE before this will cause stall in probe 5722 * and eventually fail the probe. 5723 */ 5724 timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); 5725 mod_timer(&phba->hw_check, 5726 jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); 5727 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5728 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5729 return 0; 5730 5731 free_irqs: 5732 hwi_disable_intr(phba); 5733 beiscsi_free_irqs(phba); 5734 disable_iopoll: 5735 for (i = 0; i < phba->num_cpus; i++) { 5736 pbe_eq = &phwi_context->be_eq[i]; 5737 irq_poll_disable(&pbe_eq->iopoll); 5738 } 5739 destroy_workqueue(phba->wq); 5740 free_twq: 5741 hwi_cleanup_port(phba); 5742 beiscsi_cleanup_port(phba); 5743 beiscsi_free_mem(phba); 5744 free_port: 5745 dma_free_coherent(&phba->pcidev->dev, 5746 phba->ctrl.mbox_mem_alloced.size, 5747 phba->ctrl.mbox_mem_alloced.va, 5748 phba->ctrl.mbox_mem_alloced.dma); 5749 beiscsi_unmap_pci_function(phba); 5750 free_hba: 5751 pci_disable_msix(phba->pcidev); 5752 pci_dev_put(phba->pcidev); 5753 iscsi_host_free(phba->shost); 5754 pci_set_drvdata(pcidev, NULL); 5755 disable_pci: 5756 pci_release_regions(pcidev); 5757 pci_disable_device(pcidev); 5758 return ret; 5759 } 5760 5761 static void beiscsi_remove(struct pci_dev *pcidev) 5762 { 5763 struct beiscsi_hba *phba = NULL; 5764 5765 phba = pci_get_drvdata(pcidev); 5766 if (!phba) { 5767 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5768 return; 5769 } 5770 5771 /* first stop UE detection before unloading */ 5772 del_timer_sync(&phba->hw_check); 5773 cancel_delayed_work_sync(&phba->recover_port); 5774 cancel_work_sync(&phba->sess_work); 5775 5776 beiscsi_iface_destroy_default(phba); 5777 iscsi_host_remove(phba->shost); 5778 beiscsi_disable_port(phba, 1); 5779 5780 /* after cancelling boot_work */ 5781 iscsi_boot_destroy_kset(phba->boot_struct.boot_kset); 5782 5783 /* free all resources */ 5784 destroy_workqueue(phba->wq); 5785 beiscsi_free_mem(phba); 5786 5787 /* ctrl uninit */ 5788 beiscsi_unmap_pci_function(phba); 5789 dma_free_coherent(&phba->pcidev->dev, 5790 phba->ctrl.mbox_mem_alloced.size, 5791 phba->ctrl.mbox_mem_alloced.va, 5792 phba->ctrl.mbox_mem_alloced.dma); 5793 5794 pci_dev_put(phba->pcidev); 5795 iscsi_host_free(phba->shost); 5796 pci_disable_pcie_error_reporting(pcidev); 5797 pci_set_drvdata(pcidev, NULL); 5798 pci_release_regions(pcidev); 5799 pci_disable_device(pcidev); 5800 } 5801 5802 5803 static struct pci_error_handlers beiscsi_eeh_handlers = { 5804 .error_detected = beiscsi_eeh_err_detected, 5805 .slot_reset = beiscsi_eeh_reset, 5806 .resume = beiscsi_eeh_resume, 5807 }; 5808 5809 struct iscsi_transport beiscsi_iscsi_transport = { 5810 .owner = THIS_MODULE, 5811 .name = DRV_NAME, 5812 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5813 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5814 .create_session = beiscsi_session_create, 5815 .destroy_session = beiscsi_session_destroy, 5816 .create_conn = beiscsi_conn_create, 5817 .bind_conn = beiscsi_conn_bind, 5818 .destroy_conn = iscsi_conn_teardown, 5819 .attr_is_visible = beiscsi_attr_is_visible, 5820 .set_iface_param = beiscsi_iface_set_param, 5821 .get_iface_param = beiscsi_iface_get_param, 5822 .set_param = beiscsi_set_param, 5823 .get_conn_param = iscsi_conn_get_param, 5824 .get_session_param = iscsi_session_get_param, 5825 .get_host_param = beiscsi_get_host_param, 5826 .start_conn = beiscsi_conn_start, 5827 .stop_conn = iscsi_conn_stop, 5828 .send_pdu = iscsi_conn_send_pdu, 5829 .xmit_task = beiscsi_task_xmit, 5830 .cleanup_task = beiscsi_cleanup_task, 5831 .alloc_pdu = beiscsi_alloc_pdu, 5832 .parse_pdu_itt = beiscsi_parse_pdu, 5833 .get_stats = beiscsi_conn_get_stats, 5834 .get_ep_param = beiscsi_ep_get_param, 5835 .ep_connect = beiscsi_ep_connect, 5836 .ep_poll = beiscsi_ep_poll, 5837 .ep_disconnect = beiscsi_ep_disconnect, 5838 .session_recovery_timedout = iscsi_session_recovery_timedout, 5839 .bsg_request = beiscsi_bsg_request, 5840 }; 5841 5842 static struct pci_driver beiscsi_pci_driver = { 5843 .name = DRV_NAME, 5844 .probe = beiscsi_dev_probe, 5845 .remove = beiscsi_remove, 5846 .id_table = beiscsi_pci_id_table, 5847 .err_handler = &beiscsi_eeh_handlers 5848 }; 5849 5850 static int __init beiscsi_module_init(void) 5851 { 5852 int ret; 5853 5854 beiscsi_scsi_transport = 5855 iscsi_register_transport(&beiscsi_iscsi_transport); 5856 if (!beiscsi_scsi_transport) { 5857 printk(KERN_ERR 5858 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5859 return -ENOMEM; 5860 } 5861 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5862 &beiscsi_iscsi_transport); 5863 5864 ret = pci_register_driver(&beiscsi_pci_driver); 5865 if (ret) { 5866 printk(KERN_ERR 5867 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5868 goto unregister_iscsi_transport; 5869 } 5870 return 0; 5871 5872 unregister_iscsi_transport: 5873 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5874 return ret; 5875 } 5876 5877 static void __exit beiscsi_module_exit(void) 5878 { 5879 pci_unregister_driver(&beiscsi_pci_driver); 5880 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5881 } 5882 5883 module_init(beiscsi_module_init); 5884 module_exit(beiscsi_module_exit); 5885