1 /** 2 * Copyright (C) 2005 - 2013 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 11 * 12 * Contact Information: 13 * linux-drivers@emulex.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 33 #include <scsi/libiscsi.h> 34 #include <scsi/scsi_bsg_iscsi.h> 35 #include <scsi/scsi_netlink.h> 36 #include <scsi/scsi_transport_iscsi.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi.h> 42 #include "be_main.h" 43 #include "be_iscsi.h" 44 #include "be_mgmt.h" 45 #include "be_cmds.h" 46 47 static unsigned int be_iopoll_budget = 10; 48 static unsigned int be_max_phys_size = 64; 49 static unsigned int enable_msix = 1; 50 51 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 53 MODULE_VERSION(BUILD_STR); 54 MODULE_AUTHOR("Emulex Corporation"); 55 MODULE_LICENSE("GPL"); 56 module_param(be_iopoll_budget, int, 0); 57 module_param(enable_msix, int, 0); 58 module_param(be_max_phys_size, uint, S_IRUGO); 59 MODULE_PARM_DESC(be_max_phys_size, 60 "Maximum Size (In Kilobytes) of physically contiguous " 61 "memory that can be allocated. Range is 16 - 128"); 62 63 #define beiscsi_disp_param(_name)\ 64 ssize_t \ 65 beiscsi_##_name##_disp(struct device *dev,\ 66 struct device_attribute *attrib, char *buf) \ 67 { \ 68 struct Scsi_Host *shost = class_to_shost(dev);\ 69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 70 uint32_t param_val = 0; \ 71 param_val = phba->attr_##_name;\ 72 return snprintf(buf, PAGE_SIZE, "%d\n",\ 73 phba->attr_##_name);\ 74 } 75 76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 77 int \ 78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 79 {\ 80 if (val >= _minval && val <= _maxval) {\ 81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 82 "BA_%d : beiscsi_"#_name" updated "\ 83 "from 0x%x ==> 0x%x\n",\ 84 phba->attr_##_name, val); \ 85 phba->attr_##_name = val;\ 86 return 0;\ 87 } \ 88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 89 "BA_%d beiscsi_"#_name" attribute "\ 90 "cannot be updated to 0x%x, "\ 91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 92 return -EINVAL;\ 93 } 94 95 #define beiscsi_store_param(_name) \ 96 ssize_t \ 97 beiscsi_##_name##_store(struct device *dev,\ 98 struct device_attribute *attr, const char *buf,\ 99 size_t count) \ 100 { \ 101 struct Scsi_Host *shost = class_to_shost(dev);\ 102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 103 uint32_t param_val = 0;\ 104 if (!isdigit(buf[0]))\ 105 return -EINVAL;\ 106 if (sscanf(buf, "%i", ¶m_val) != 1)\ 107 return -EINVAL;\ 108 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 109 return strlen(buf);\ 110 else \ 111 return -EINVAL;\ 112 } 113 114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 115 int \ 116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 117 { \ 118 if (val >= _minval && val <= _maxval) {\ 119 phba->attr_##_name = val;\ 120 return 0;\ 121 } \ 122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 123 "BA_%d beiscsi_"#_name" attribute " \ 124 "cannot be updated to 0x%x, "\ 125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 126 phba->attr_##_name = _defval;\ 127 return -EINVAL;\ 128 } 129 130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 131 static uint beiscsi_##_name = _defval;\ 132 module_param(beiscsi_##_name, uint, S_IRUGO);\ 133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 134 beiscsi_disp_param(_name)\ 135 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 136 beiscsi_store_param(_name)\ 137 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 139 beiscsi_##_name##_disp, beiscsi_##_name##_store) 140 141 /* 142 * When new log level added update the 143 * the MAX allowed value for log_enable 144 */ 145 BEISCSI_RW_ATTR(log_enable, 0x00, 146 0xFF, 0x00, "Enable logging Bit Mask\n" 147 "\t\t\t\tInitialization Events : 0x01\n" 148 "\t\t\t\tMailbox Events : 0x02\n" 149 "\t\t\t\tMiscellaneous Events : 0x04\n" 150 "\t\t\t\tError Handling : 0x08\n" 151 "\t\t\t\tIO Path Events : 0x10\n" 152 "\t\t\t\tConfiguration Path : 0x20\n"); 153 154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 155 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 156 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 157 DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); 158 struct device_attribute *beiscsi_attrs[] = { 159 &dev_attr_beiscsi_log_enable, 160 &dev_attr_beiscsi_drvr_ver, 161 &dev_attr_beiscsi_adapter_family, 162 &dev_attr_beiscsi_fw_ver, 163 &dev_attr_beiscsi_active_cid_count, 164 NULL, 165 }; 166 167 static char const *cqe_desc[] = { 168 "RESERVED_DESC", 169 "SOL_CMD_COMPLETE", 170 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 171 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 172 "CXN_KILLED_BURST_LEN_MISMATCH", 173 "CXN_KILLED_AHS_RCVD", 174 "CXN_KILLED_HDR_DIGEST_ERR", 175 "CXN_KILLED_UNKNOWN_HDR", 176 "CXN_KILLED_STALE_ITT_TTT_RCVD", 177 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 178 "CXN_KILLED_RST_RCVD", 179 "CXN_KILLED_TIMED_OUT", 180 "CXN_KILLED_RST_SENT", 181 "CXN_KILLED_FIN_RCVD", 182 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 183 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 184 "CXN_KILLED_OVER_RUN_RESIDUAL", 185 "CXN_KILLED_UNDER_RUN_RESIDUAL", 186 "CMD_KILLED_INVALID_STATSN_RCVD", 187 "CMD_KILLED_INVALID_R2T_RCVD", 188 "CMD_CXN_KILLED_LUN_INVALID", 189 "CMD_CXN_KILLED_ICD_INVALID", 190 "CMD_CXN_KILLED_ITT_INVALID", 191 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 192 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 193 "CXN_INVALIDATE_NOTIFY", 194 "CXN_INVALIDATE_INDEX_NOTIFY", 195 "CMD_INVALIDATED_NOTIFY", 196 "UNSOL_HDR_NOTIFY", 197 "UNSOL_DATA_NOTIFY", 198 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 199 "DRIVERMSG_NOTIFY", 200 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 201 "SOL_CMD_KILLED_DIF_ERR", 202 "CXN_KILLED_SYN_RCVD", 203 "CXN_KILLED_IMM_DATA_RCVD" 204 }; 205 206 static int beiscsi_slave_configure(struct scsi_device *sdev) 207 { 208 blk_queue_max_segment_size(sdev->request_queue, 65536); 209 return 0; 210 } 211 212 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 213 { 214 struct iscsi_cls_session *cls_session; 215 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 216 struct beiscsi_io_task *aborted_io_task; 217 struct iscsi_conn *conn; 218 struct beiscsi_conn *beiscsi_conn; 219 struct beiscsi_hba *phba; 220 struct iscsi_session *session; 221 struct invalidate_command_table *inv_tbl; 222 struct be_dma_mem nonemb_cmd; 223 unsigned int cid, tag, num_invalidate; 224 225 cls_session = starget_to_session(scsi_target(sc->device)); 226 session = cls_session->dd_data; 227 228 spin_lock_bh(&session->lock); 229 if (!aborted_task || !aborted_task->sc) { 230 /* we raced */ 231 spin_unlock_bh(&session->lock); 232 return SUCCESS; 233 } 234 235 aborted_io_task = aborted_task->dd_data; 236 if (!aborted_io_task->scsi_cmnd) { 237 /* raced or invalid command */ 238 spin_unlock_bh(&session->lock); 239 return SUCCESS; 240 } 241 spin_unlock_bh(&session->lock); 242 conn = aborted_task->conn; 243 beiscsi_conn = conn->dd_data; 244 phba = beiscsi_conn->phba; 245 246 /* invalidate iocb */ 247 cid = beiscsi_conn->beiscsi_conn_cid; 248 inv_tbl = phba->inv_tbl; 249 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 250 inv_tbl->cid = cid; 251 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 252 num_invalidate = 1; 253 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 254 sizeof(struct invalidate_commands_params_in), 255 &nonemb_cmd.dma); 256 if (nonemb_cmd.va == NULL) { 257 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 258 "BM_%d : Failed to allocate memory for" 259 "mgmt_invalidate_icds\n"); 260 return FAILED; 261 } 262 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 263 264 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 265 cid, &nonemb_cmd); 266 if (!tag) { 267 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 268 "BM_%d : mgmt_invalidate_icds could not be" 269 "submitted\n"); 270 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 271 nonemb_cmd.va, nonemb_cmd.dma); 272 273 return FAILED; 274 } 275 276 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 277 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 278 nonemb_cmd.va, nonemb_cmd.dma); 279 return iscsi_eh_abort(sc); 280 } 281 282 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 283 { 284 struct iscsi_task *abrt_task; 285 struct beiscsi_io_task *abrt_io_task; 286 struct iscsi_conn *conn; 287 struct beiscsi_conn *beiscsi_conn; 288 struct beiscsi_hba *phba; 289 struct iscsi_session *session; 290 struct iscsi_cls_session *cls_session; 291 struct invalidate_command_table *inv_tbl; 292 struct be_dma_mem nonemb_cmd; 293 unsigned int cid, tag, i, num_invalidate; 294 295 /* invalidate iocbs */ 296 cls_session = starget_to_session(scsi_target(sc->device)); 297 session = cls_session->dd_data; 298 spin_lock_bh(&session->lock); 299 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 300 spin_unlock_bh(&session->lock); 301 return FAILED; 302 } 303 conn = session->leadconn; 304 beiscsi_conn = conn->dd_data; 305 phba = beiscsi_conn->phba; 306 cid = beiscsi_conn->beiscsi_conn_cid; 307 inv_tbl = phba->inv_tbl; 308 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 309 num_invalidate = 0; 310 for (i = 0; i < conn->session->cmds_max; i++) { 311 abrt_task = conn->session->cmds[i]; 312 abrt_io_task = abrt_task->dd_data; 313 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 314 continue; 315 316 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun) 317 continue; 318 319 inv_tbl->cid = cid; 320 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 321 num_invalidate++; 322 inv_tbl++; 323 } 324 spin_unlock_bh(&session->lock); 325 inv_tbl = phba->inv_tbl; 326 327 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 328 sizeof(struct invalidate_commands_params_in), 329 &nonemb_cmd.dma); 330 if (nonemb_cmd.va == NULL) { 331 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 332 "BM_%d : Failed to allocate memory for" 333 "mgmt_invalidate_icds\n"); 334 return FAILED; 335 } 336 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 337 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 338 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 339 cid, &nonemb_cmd); 340 if (!tag) { 341 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 342 "BM_%d : mgmt_invalidate_icds could not be" 343 " submitted\n"); 344 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 345 nonemb_cmd.va, nonemb_cmd.dma); 346 return FAILED; 347 } 348 349 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 350 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 351 nonemb_cmd.va, nonemb_cmd.dma); 352 return iscsi_eh_device_reset(sc); 353 } 354 355 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 356 { 357 struct beiscsi_hba *phba = data; 358 struct mgmt_session_info *boot_sess = &phba->boot_sess; 359 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 360 char *str = buf; 361 int rc; 362 363 switch (type) { 364 case ISCSI_BOOT_TGT_NAME: 365 rc = sprintf(buf, "%.*s\n", 366 (int)strlen(boot_sess->target_name), 367 (char *)&boot_sess->target_name); 368 break; 369 case ISCSI_BOOT_TGT_IP_ADDR: 370 if (boot_conn->dest_ipaddr.ip_type == 0x1) 371 rc = sprintf(buf, "%pI4\n", 372 (char *)&boot_conn->dest_ipaddr.addr); 373 else 374 rc = sprintf(str, "%pI6\n", 375 (char *)&boot_conn->dest_ipaddr.addr); 376 break; 377 case ISCSI_BOOT_TGT_PORT: 378 rc = sprintf(str, "%d\n", boot_conn->dest_port); 379 break; 380 381 case ISCSI_BOOT_TGT_CHAP_NAME: 382 rc = sprintf(str, "%.*s\n", 383 boot_conn->negotiated_login_options.auth_data.chap. 384 target_chap_name_length, 385 (char *)&boot_conn->negotiated_login_options. 386 auth_data.chap.target_chap_name); 387 break; 388 case ISCSI_BOOT_TGT_CHAP_SECRET: 389 rc = sprintf(str, "%.*s\n", 390 boot_conn->negotiated_login_options.auth_data.chap. 391 target_secret_length, 392 (char *)&boot_conn->negotiated_login_options. 393 auth_data.chap.target_secret); 394 break; 395 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 396 rc = sprintf(str, "%.*s\n", 397 boot_conn->negotiated_login_options.auth_data.chap. 398 intr_chap_name_length, 399 (char *)&boot_conn->negotiated_login_options. 400 auth_data.chap.intr_chap_name); 401 break; 402 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 403 rc = sprintf(str, "%.*s\n", 404 boot_conn->negotiated_login_options.auth_data.chap. 405 intr_secret_length, 406 (char *)&boot_conn->negotiated_login_options. 407 auth_data.chap.intr_secret); 408 break; 409 case ISCSI_BOOT_TGT_FLAGS: 410 rc = sprintf(str, "2\n"); 411 break; 412 case ISCSI_BOOT_TGT_NIC_ASSOC: 413 rc = sprintf(str, "0\n"); 414 break; 415 default: 416 rc = -ENOSYS; 417 break; 418 } 419 return rc; 420 } 421 422 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 423 { 424 struct beiscsi_hba *phba = data; 425 char *str = buf; 426 int rc; 427 428 switch (type) { 429 case ISCSI_BOOT_INI_INITIATOR_NAME: 430 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 431 break; 432 default: 433 rc = -ENOSYS; 434 break; 435 } 436 return rc; 437 } 438 439 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 440 { 441 struct beiscsi_hba *phba = data; 442 char *str = buf; 443 int rc; 444 445 switch (type) { 446 case ISCSI_BOOT_ETH_FLAGS: 447 rc = sprintf(str, "2\n"); 448 break; 449 case ISCSI_BOOT_ETH_INDEX: 450 rc = sprintf(str, "0\n"); 451 break; 452 case ISCSI_BOOT_ETH_MAC: 453 rc = beiscsi_get_macaddr(str, phba); 454 break; 455 default: 456 rc = -ENOSYS; 457 break; 458 } 459 return rc; 460 } 461 462 463 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 464 { 465 umode_t rc; 466 467 switch (type) { 468 case ISCSI_BOOT_TGT_NAME: 469 case ISCSI_BOOT_TGT_IP_ADDR: 470 case ISCSI_BOOT_TGT_PORT: 471 case ISCSI_BOOT_TGT_CHAP_NAME: 472 case ISCSI_BOOT_TGT_CHAP_SECRET: 473 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 474 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 475 case ISCSI_BOOT_TGT_NIC_ASSOC: 476 case ISCSI_BOOT_TGT_FLAGS: 477 rc = S_IRUGO; 478 break; 479 default: 480 rc = 0; 481 break; 482 } 483 return rc; 484 } 485 486 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 487 { 488 umode_t rc; 489 490 switch (type) { 491 case ISCSI_BOOT_INI_INITIATOR_NAME: 492 rc = S_IRUGO; 493 break; 494 default: 495 rc = 0; 496 break; 497 } 498 return rc; 499 } 500 501 502 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 503 { 504 umode_t rc; 505 506 switch (type) { 507 case ISCSI_BOOT_ETH_FLAGS: 508 case ISCSI_BOOT_ETH_MAC: 509 case ISCSI_BOOT_ETH_INDEX: 510 rc = S_IRUGO; 511 break; 512 default: 513 rc = 0; 514 break; 515 } 516 return rc; 517 } 518 519 /*------------------- PCI Driver operations and data ----------------- */ 520 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = { 521 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 522 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 523 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 524 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 525 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 526 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 527 { 0 } 528 }; 529 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 530 531 532 static struct scsi_host_template beiscsi_sht = { 533 .module = THIS_MODULE, 534 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 535 .proc_name = DRV_NAME, 536 .queuecommand = iscsi_queuecommand, 537 .change_queue_depth = iscsi_change_queue_depth, 538 .slave_configure = beiscsi_slave_configure, 539 .target_alloc = iscsi_target_alloc, 540 .eh_abort_handler = beiscsi_eh_abort, 541 .eh_device_reset_handler = beiscsi_eh_device_reset, 542 .eh_target_reset_handler = iscsi_eh_session_reset, 543 .shost_attrs = beiscsi_attrs, 544 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 545 .can_queue = BE2_IO_DEPTH, 546 .this_id = -1, 547 .max_sectors = BEISCSI_MAX_SECTORS, 548 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 549 .use_clustering = ENABLE_CLUSTERING, 550 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 551 552 }; 553 554 static struct scsi_transport_template *beiscsi_scsi_transport; 555 556 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 557 { 558 struct beiscsi_hba *phba; 559 struct Scsi_Host *shost; 560 561 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 562 if (!shost) { 563 dev_err(&pcidev->dev, 564 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 565 return NULL; 566 } 567 shost->dma_boundary = pcidev->dma_mask; 568 shost->max_id = BE2_MAX_SESSIONS; 569 shost->max_channel = 0; 570 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 571 shost->max_lun = BEISCSI_NUM_MAX_LUN; 572 shost->transportt = beiscsi_scsi_transport; 573 phba = iscsi_host_priv(shost); 574 memset(phba, 0, sizeof(*phba)); 575 phba->shost = shost; 576 phba->pcidev = pci_dev_get(pcidev); 577 pci_set_drvdata(pcidev, phba); 578 phba->interface_handle = 0xFFFFFFFF; 579 580 if (iscsi_host_add(shost, &phba->pcidev->dev)) 581 goto free_devices; 582 583 return phba; 584 585 free_devices: 586 pci_dev_put(phba->pcidev); 587 iscsi_host_free(phba->shost); 588 return NULL; 589 } 590 591 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 592 { 593 if (phba->csr_va) { 594 iounmap(phba->csr_va); 595 phba->csr_va = NULL; 596 } 597 if (phba->db_va) { 598 iounmap(phba->db_va); 599 phba->db_va = NULL; 600 } 601 if (phba->pci_va) { 602 iounmap(phba->pci_va); 603 phba->pci_va = NULL; 604 } 605 } 606 607 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 608 struct pci_dev *pcidev) 609 { 610 u8 __iomem *addr; 611 int pcicfg_reg; 612 613 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 614 pci_resource_len(pcidev, 2)); 615 if (addr == NULL) 616 return -ENOMEM; 617 phba->ctrl.csr = addr; 618 phba->csr_va = addr; 619 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 620 621 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 622 if (addr == NULL) 623 goto pci_map_err; 624 phba->ctrl.db = addr; 625 phba->db_va = addr; 626 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 627 628 if (phba->generation == BE_GEN2) 629 pcicfg_reg = 1; 630 else 631 pcicfg_reg = 0; 632 633 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 634 pci_resource_len(pcidev, pcicfg_reg)); 635 636 if (addr == NULL) 637 goto pci_map_err; 638 phba->ctrl.pcicfg = addr; 639 phba->pci_va = addr; 640 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 641 return 0; 642 643 pci_map_err: 644 beiscsi_unmap_pci_function(phba); 645 return -ENOMEM; 646 } 647 648 static int beiscsi_enable_pci(struct pci_dev *pcidev) 649 { 650 int ret; 651 652 ret = pci_enable_device(pcidev); 653 if (ret) { 654 dev_err(&pcidev->dev, 655 "beiscsi_enable_pci - enable device failed\n"); 656 return ret; 657 } 658 659 pci_set_master(pcidev); 660 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { 661 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); 662 if (ret) { 663 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 664 pci_disable_device(pcidev); 665 return ret; 666 } 667 } 668 return 0; 669 } 670 671 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 672 { 673 struct be_ctrl_info *ctrl = &phba->ctrl; 674 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 675 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 676 int status = 0; 677 678 ctrl->pdev = pdev; 679 status = beiscsi_map_pci_bars(phba, pdev); 680 if (status) 681 return status; 682 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 683 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 684 mbox_mem_alloc->size, 685 &mbox_mem_alloc->dma); 686 if (!mbox_mem_alloc->va) { 687 beiscsi_unmap_pci_function(phba); 688 return -ENOMEM; 689 } 690 691 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 692 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 693 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 694 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 695 spin_lock_init(&ctrl->mbox_lock); 696 spin_lock_init(&phba->ctrl.mcc_lock); 697 spin_lock_init(&phba->ctrl.mcc_cq_lock); 698 699 return status; 700 } 701 702 static void beiscsi_get_params(struct beiscsi_hba *phba) 703 { 704 phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count 705 - (phba->fw_config.iscsi_cid_count 706 + BE2_TMFS 707 + BE2_NOPOUT_REQ)); 708 phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; 709 phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; 710 phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; 711 phba->params.num_sge_per_io = BE2_SGE; 712 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 713 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 714 phba->params.eq_timer = 64; 715 phba->params.num_eq_entries = 716 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 717 + BE2_TMFS) / 512) + 1) * 512; 718 phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024) 719 ? 1024 : phba->params.num_eq_entries; 720 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 721 "BM_%d : phba->params.num_eq_entries=%d\n", 722 phba->params.num_eq_entries); 723 phba->params.num_cq_entries = 724 (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2 725 + BE2_TMFS) / 512) + 1) * 512; 726 phba->params.wrbs_per_cxn = 256; 727 } 728 729 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 730 unsigned int id, unsigned int clr_interrupt, 731 unsigned int num_processed, 732 unsigned char rearm, unsigned char event) 733 { 734 u32 val = 0; 735 val |= id & DB_EQ_RING_ID_MASK; 736 if (rearm) 737 val |= 1 << DB_EQ_REARM_SHIFT; 738 if (clr_interrupt) 739 val |= 1 << DB_EQ_CLR_SHIFT; 740 if (event) 741 val |= 1 << DB_EQ_EVNT_SHIFT; 742 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 743 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 744 } 745 746 /** 747 * be_isr_mcc - The isr routine of the driver. 748 * @irq: Not used 749 * @dev_id: Pointer to host adapter structure 750 */ 751 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 752 { 753 struct beiscsi_hba *phba; 754 struct be_eq_entry *eqe = NULL; 755 struct be_queue_info *eq; 756 struct be_queue_info *mcc; 757 unsigned int num_eq_processed; 758 struct be_eq_obj *pbe_eq; 759 unsigned long flags; 760 761 pbe_eq = dev_id; 762 eq = &pbe_eq->q; 763 phba = pbe_eq->phba; 764 mcc = &phba->ctrl.mcc_obj.cq; 765 eqe = queue_tail_node(eq); 766 767 num_eq_processed = 0; 768 769 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 770 & EQE_VALID_MASK) { 771 if (((eqe->dw[offsetof(struct amap_eq_entry, 772 resource_id) / 32] & 773 EQE_RESID_MASK) >> 16) == mcc->id) { 774 spin_lock_irqsave(&phba->isr_lock, flags); 775 pbe_eq->todo_mcc_cq = true; 776 spin_unlock_irqrestore(&phba->isr_lock, flags); 777 } 778 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 779 queue_tail_inc(eq); 780 eqe = queue_tail_node(eq); 781 num_eq_processed++; 782 } 783 if (pbe_eq->todo_mcc_cq) 784 queue_work(phba->wq, &pbe_eq->work_cqs); 785 if (num_eq_processed) 786 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 787 788 return IRQ_HANDLED; 789 } 790 791 /** 792 * be_isr_msix - The isr routine of the driver. 793 * @irq: Not used 794 * @dev_id: Pointer to host adapter structure 795 */ 796 static irqreturn_t be_isr_msix(int irq, void *dev_id) 797 { 798 struct beiscsi_hba *phba; 799 struct be_eq_entry *eqe = NULL; 800 struct be_queue_info *eq; 801 struct be_queue_info *cq; 802 unsigned int num_eq_processed; 803 struct be_eq_obj *pbe_eq; 804 unsigned long flags; 805 806 pbe_eq = dev_id; 807 eq = &pbe_eq->q; 808 cq = pbe_eq->cq; 809 eqe = queue_tail_node(eq); 810 811 phba = pbe_eq->phba; 812 num_eq_processed = 0; 813 if (blk_iopoll_enabled) { 814 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 815 & EQE_VALID_MASK) { 816 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 817 blk_iopoll_sched(&pbe_eq->iopoll); 818 819 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 820 queue_tail_inc(eq); 821 eqe = queue_tail_node(eq); 822 num_eq_processed++; 823 } 824 } else { 825 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 826 & EQE_VALID_MASK) { 827 spin_lock_irqsave(&phba->isr_lock, flags); 828 pbe_eq->todo_cq = true; 829 spin_unlock_irqrestore(&phba->isr_lock, flags); 830 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 831 queue_tail_inc(eq); 832 eqe = queue_tail_node(eq); 833 num_eq_processed++; 834 } 835 836 if (pbe_eq->todo_cq) 837 queue_work(phba->wq, &pbe_eq->work_cqs); 838 } 839 840 if (num_eq_processed) 841 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 842 843 return IRQ_HANDLED; 844 } 845 846 /** 847 * be_isr - The isr routine of the driver. 848 * @irq: Not used 849 * @dev_id: Pointer to host adapter structure 850 */ 851 static irqreturn_t be_isr(int irq, void *dev_id) 852 { 853 struct beiscsi_hba *phba; 854 struct hwi_controller *phwi_ctrlr; 855 struct hwi_context_memory *phwi_context; 856 struct be_eq_entry *eqe = NULL; 857 struct be_queue_info *eq; 858 struct be_queue_info *cq; 859 struct be_queue_info *mcc; 860 unsigned long flags, index; 861 unsigned int num_mcceq_processed, num_ioeq_processed; 862 struct be_ctrl_info *ctrl; 863 struct be_eq_obj *pbe_eq; 864 int isr; 865 866 phba = dev_id; 867 ctrl = &phba->ctrl; 868 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 869 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 870 if (!isr) 871 return IRQ_NONE; 872 873 phwi_ctrlr = phba->phwi_ctrlr; 874 phwi_context = phwi_ctrlr->phwi_ctxt; 875 pbe_eq = &phwi_context->be_eq[0]; 876 877 eq = &phwi_context->be_eq[0].q; 878 mcc = &phba->ctrl.mcc_obj.cq; 879 index = 0; 880 eqe = queue_tail_node(eq); 881 882 num_ioeq_processed = 0; 883 num_mcceq_processed = 0; 884 if (blk_iopoll_enabled) { 885 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 886 & EQE_VALID_MASK) { 887 if (((eqe->dw[offsetof(struct amap_eq_entry, 888 resource_id) / 32] & 889 EQE_RESID_MASK) >> 16) == mcc->id) { 890 spin_lock_irqsave(&phba->isr_lock, flags); 891 pbe_eq->todo_mcc_cq = true; 892 spin_unlock_irqrestore(&phba->isr_lock, flags); 893 num_mcceq_processed++; 894 } else { 895 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 896 blk_iopoll_sched(&pbe_eq->iopoll); 897 num_ioeq_processed++; 898 } 899 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 900 queue_tail_inc(eq); 901 eqe = queue_tail_node(eq); 902 } 903 if (num_ioeq_processed || num_mcceq_processed) { 904 if (pbe_eq->todo_mcc_cq) 905 queue_work(phba->wq, &pbe_eq->work_cqs); 906 907 if ((num_mcceq_processed) && (!num_ioeq_processed)) 908 hwi_ring_eq_db(phba, eq->id, 0, 909 (num_ioeq_processed + 910 num_mcceq_processed) , 1, 1); 911 else 912 hwi_ring_eq_db(phba, eq->id, 0, 913 (num_ioeq_processed + 914 num_mcceq_processed), 0, 1); 915 916 return IRQ_HANDLED; 917 } else 918 return IRQ_NONE; 919 } else { 920 cq = &phwi_context->be_cq[0]; 921 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 922 & EQE_VALID_MASK) { 923 924 if (((eqe->dw[offsetof(struct amap_eq_entry, 925 resource_id) / 32] & 926 EQE_RESID_MASK) >> 16) != cq->id) { 927 spin_lock_irqsave(&phba->isr_lock, flags); 928 pbe_eq->todo_mcc_cq = true; 929 spin_unlock_irqrestore(&phba->isr_lock, flags); 930 } else { 931 spin_lock_irqsave(&phba->isr_lock, flags); 932 pbe_eq->todo_cq = true; 933 spin_unlock_irqrestore(&phba->isr_lock, flags); 934 } 935 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 936 queue_tail_inc(eq); 937 eqe = queue_tail_node(eq); 938 num_ioeq_processed++; 939 } 940 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq) 941 queue_work(phba->wq, &pbe_eq->work_cqs); 942 943 if (num_ioeq_processed) { 944 hwi_ring_eq_db(phba, eq->id, 0, 945 num_ioeq_processed, 1, 1); 946 return IRQ_HANDLED; 947 } else 948 return IRQ_NONE; 949 } 950 } 951 952 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 953 { 954 struct pci_dev *pcidev = phba->pcidev; 955 struct hwi_controller *phwi_ctrlr; 956 struct hwi_context_memory *phwi_context; 957 int ret, msix_vec, i, j; 958 959 phwi_ctrlr = phba->phwi_ctrlr; 960 phwi_context = phwi_ctrlr->phwi_ctxt; 961 962 if (phba->msix_enabled) { 963 for (i = 0; i < phba->num_cpus; i++) { 964 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 965 GFP_KERNEL); 966 if (!phba->msi_name[i]) { 967 ret = -ENOMEM; 968 goto free_msix_irqs; 969 } 970 971 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 972 phba->shost->host_no, i); 973 msix_vec = phba->msix_entries[i].vector; 974 ret = request_irq(msix_vec, be_isr_msix, 0, 975 phba->msi_name[i], 976 &phwi_context->be_eq[i]); 977 if (ret) { 978 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 979 "BM_%d : beiscsi_init_irqs-Failed to" 980 "register msix for i = %d\n", 981 i); 982 kfree(phba->msi_name[i]); 983 goto free_msix_irqs; 984 } 985 } 986 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 987 if (!phba->msi_name[i]) { 988 ret = -ENOMEM; 989 goto free_msix_irqs; 990 } 991 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 992 phba->shost->host_no); 993 msix_vec = phba->msix_entries[i].vector; 994 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 995 &phwi_context->be_eq[i]); 996 if (ret) { 997 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 998 "BM_%d : beiscsi_init_irqs-" 999 "Failed to register beiscsi_msix_mcc\n"); 1000 kfree(phba->msi_name[i]); 1001 goto free_msix_irqs; 1002 } 1003 1004 } else { 1005 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 1006 "beiscsi", phba); 1007 if (ret) { 1008 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1009 "BM_%d : beiscsi_init_irqs-" 1010 "Failed to register irq\\n"); 1011 return ret; 1012 } 1013 } 1014 return 0; 1015 free_msix_irqs: 1016 for (j = i - 1; j >= 0; j--) { 1017 kfree(phba->msi_name[j]); 1018 msix_vec = phba->msix_entries[j].vector; 1019 free_irq(msix_vec, &phwi_context->be_eq[j]); 1020 } 1021 return ret; 1022 } 1023 1024 static void hwi_ring_cq_db(struct beiscsi_hba *phba, 1025 unsigned int id, unsigned int num_processed, 1026 unsigned char rearm, unsigned char event) 1027 { 1028 u32 val = 0; 1029 val |= id & DB_CQ_RING_ID_MASK; 1030 if (rearm) 1031 val |= 1 << DB_CQ_REARM_SHIFT; 1032 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 1033 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 1034 } 1035 1036 static unsigned int 1037 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1038 struct beiscsi_hba *phba, 1039 struct pdu_base *ppdu, 1040 unsigned long pdu_len, 1041 void *pbuffer, unsigned long buf_len) 1042 { 1043 struct iscsi_conn *conn = beiscsi_conn->conn; 1044 struct iscsi_session *session = conn->session; 1045 struct iscsi_task *task; 1046 struct beiscsi_io_task *io_task; 1047 struct iscsi_hdr *login_hdr; 1048 1049 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 1050 PDUBASE_OPCODE_MASK) { 1051 case ISCSI_OP_NOOP_IN: 1052 pbuffer = NULL; 1053 buf_len = 0; 1054 break; 1055 case ISCSI_OP_ASYNC_EVENT: 1056 break; 1057 case ISCSI_OP_REJECT: 1058 WARN_ON(!pbuffer); 1059 WARN_ON(!(buf_len == 48)); 1060 beiscsi_log(phba, KERN_ERR, 1061 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1062 "BM_%d : In ISCSI_OP_REJECT\n"); 1063 break; 1064 case ISCSI_OP_LOGIN_RSP: 1065 case ISCSI_OP_TEXT_RSP: 1066 task = conn->login_task; 1067 io_task = task->dd_data; 1068 login_hdr = (struct iscsi_hdr *)ppdu; 1069 login_hdr->itt = io_task->libiscsi_itt; 1070 break; 1071 default: 1072 beiscsi_log(phba, KERN_WARNING, 1073 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1074 "BM_%d : Unrecognized opcode 0x%x in async msg\n", 1075 (ppdu-> 1076 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1077 & PDUBASE_OPCODE_MASK)); 1078 return 1; 1079 } 1080 1081 spin_lock_bh(&session->lock); 1082 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1083 spin_unlock_bh(&session->lock); 1084 return 0; 1085 } 1086 1087 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 1088 { 1089 struct sgl_handle *psgl_handle; 1090 1091 if (phba->io_sgl_hndl_avbl) { 1092 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1093 "BM_%d : In alloc_io_sgl_handle," 1094 " io_sgl_alloc_index=%d\n", 1095 phba->io_sgl_alloc_index); 1096 1097 psgl_handle = phba->io_sgl_hndl_base[phba-> 1098 io_sgl_alloc_index]; 1099 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1100 phba->io_sgl_hndl_avbl--; 1101 if (phba->io_sgl_alloc_index == (phba->params. 1102 ios_per_ctrl - 1)) 1103 phba->io_sgl_alloc_index = 0; 1104 else 1105 phba->io_sgl_alloc_index++; 1106 } else 1107 psgl_handle = NULL; 1108 return psgl_handle; 1109 } 1110 1111 static void 1112 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1113 { 1114 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1115 "BM_%d : In free_,io_sgl_free_index=%d\n", 1116 phba->io_sgl_free_index); 1117 1118 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1119 /* 1120 * this can happen if clean_task is called on a task that 1121 * failed in xmit_task or alloc_pdu. 1122 */ 1123 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1124 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 1125 "value there=%p\n", phba->io_sgl_free_index, 1126 phba->io_sgl_hndl_base 1127 [phba->io_sgl_free_index]); 1128 return; 1129 } 1130 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1131 phba->io_sgl_hndl_avbl++; 1132 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 1133 phba->io_sgl_free_index = 0; 1134 else 1135 phba->io_sgl_free_index++; 1136 } 1137 1138 /** 1139 * alloc_wrb_handle - To allocate a wrb handle 1140 * @phba: The hba pointer 1141 * @cid: The cid to use for allocation 1142 * 1143 * This happens under session_lock until submission to chip 1144 */ 1145 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) 1146 { 1147 struct hwi_wrb_context *pwrb_context; 1148 struct hwi_controller *phwi_ctrlr; 1149 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1150 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1151 1152 phwi_ctrlr = phba->phwi_ctrlr; 1153 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1154 if (pwrb_context->wrb_handles_available >= 2) { 1155 pwrb_handle = pwrb_context->pwrb_handle_base[ 1156 pwrb_context->alloc_index]; 1157 pwrb_context->wrb_handles_available--; 1158 if (pwrb_context->alloc_index == 1159 (phba->params.wrbs_per_cxn - 1)) 1160 pwrb_context->alloc_index = 0; 1161 else 1162 pwrb_context->alloc_index++; 1163 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ 1164 pwrb_context->alloc_index]; 1165 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; 1166 } else 1167 pwrb_handle = NULL; 1168 return pwrb_handle; 1169 } 1170 1171 /** 1172 * free_wrb_handle - To free the wrb handle back to pool 1173 * @phba: The hba pointer 1174 * @pwrb_context: The context to free from 1175 * @pwrb_handle: The wrb_handle to free 1176 * 1177 * This happens under session_lock until submission to chip 1178 */ 1179 static void 1180 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1181 struct wrb_handle *pwrb_handle) 1182 { 1183 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1184 pwrb_context->wrb_handles_available++; 1185 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1186 pwrb_context->free_index = 0; 1187 else 1188 pwrb_context->free_index++; 1189 1190 beiscsi_log(phba, KERN_INFO, 1191 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1192 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1193 "wrb_handles_available=%d\n", 1194 pwrb_handle, pwrb_context->free_index, 1195 pwrb_context->wrb_handles_available); 1196 } 1197 1198 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1199 { 1200 struct sgl_handle *psgl_handle; 1201 1202 if (phba->eh_sgl_hndl_avbl) { 1203 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1204 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1205 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1206 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1207 phba->eh_sgl_alloc_index, 1208 phba->eh_sgl_alloc_index); 1209 1210 phba->eh_sgl_hndl_avbl--; 1211 if (phba->eh_sgl_alloc_index == 1212 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1213 1)) 1214 phba->eh_sgl_alloc_index = 0; 1215 else 1216 phba->eh_sgl_alloc_index++; 1217 } else 1218 psgl_handle = NULL; 1219 return psgl_handle; 1220 } 1221 1222 void 1223 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1224 { 1225 1226 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1227 "BM_%d : In free_mgmt_sgl_handle," 1228 "eh_sgl_free_index=%d\n", 1229 phba->eh_sgl_free_index); 1230 1231 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1232 /* 1233 * this can happen if clean_task is called on a task that 1234 * failed in xmit_task or alloc_pdu. 1235 */ 1236 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1237 "BM_%d : Double Free in eh SGL ," 1238 "eh_sgl_free_index=%d\n", 1239 phba->eh_sgl_free_index); 1240 return; 1241 } 1242 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1243 phba->eh_sgl_hndl_avbl++; 1244 if (phba->eh_sgl_free_index == 1245 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1246 phba->eh_sgl_free_index = 0; 1247 else 1248 phba->eh_sgl_free_index++; 1249 } 1250 1251 static void 1252 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1253 struct iscsi_task *task, 1254 struct common_sol_cqe *csol_cqe) 1255 { 1256 struct beiscsi_io_task *io_task = task->dd_data; 1257 struct be_status_bhs *sts_bhs = 1258 (struct be_status_bhs *)io_task->cmd_bhs; 1259 struct iscsi_conn *conn = beiscsi_conn->conn; 1260 unsigned char *sense; 1261 u32 resid = 0, exp_cmdsn, max_cmdsn; 1262 u8 rsp, status, flags; 1263 1264 exp_cmdsn = csol_cqe->exp_cmdsn; 1265 max_cmdsn = (csol_cqe->exp_cmdsn + 1266 csol_cqe->cmd_wnd - 1); 1267 rsp = csol_cqe->i_resp; 1268 status = csol_cqe->i_sts; 1269 flags = csol_cqe->i_flags; 1270 resid = csol_cqe->res_cnt; 1271 1272 if (!task->sc) { 1273 if (io_task->scsi_cmnd) 1274 scsi_dma_unmap(io_task->scsi_cmnd); 1275 1276 return; 1277 } 1278 task->sc->result = (DID_OK << 16) | status; 1279 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1280 task->sc->result = DID_ERROR << 16; 1281 goto unmap; 1282 } 1283 1284 /* bidi not initially supported */ 1285 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1286 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1287 task->sc->result = DID_ERROR << 16; 1288 1289 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1290 scsi_set_resid(task->sc, resid); 1291 if (!status && (scsi_bufflen(task->sc) - resid < 1292 task->sc->underflow)) 1293 task->sc->result = DID_ERROR << 16; 1294 } 1295 } 1296 1297 if (status == SAM_STAT_CHECK_CONDITION) { 1298 u16 sense_len; 1299 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1300 1301 sense = sts_bhs->sense_info + sizeof(unsigned short); 1302 sense_len = be16_to_cpu(*slen); 1303 memcpy(task->sc->sense_buffer, sense, 1304 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1305 } 1306 1307 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1308 conn->rxdata_octets += resid; 1309 unmap: 1310 scsi_dma_unmap(io_task->scsi_cmnd); 1311 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1312 } 1313 1314 static void 1315 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1316 struct iscsi_task *task, 1317 struct common_sol_cqe *csol_cqe) 1318 { 1319 struct iscsi_logout_rsp *hdr; 1320 struct beiscsi_io_task *io_task = task->dd_data; 1321 struct iscsi_conn *conn = beiscsi_conn->conn; 1322 1323 hdr = (struct iscsi_logout_rsp *)task->hdr; 1324 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1325 hdr->t2wait = 5; 1326 hdr->t2retain = 0; 1327 hdr->flags = csol_cqe->i_flags; 1328 hdr->response = csol_cqe->i_resp; 1329 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1330 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1331 csol_cqe->cmd_wnd - 1); 1332 1333 hdr->dlength[0] = 0; 1334 hdr->dlength[1] = 0; 1335 hdr->dlength[2] = 0; 1336 hdr->hlength = 0; 1337 hdr->itt = io_task->libiscsi_itt; 1338 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1339 } 1340 1341 static void 1342 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1343 struct iscsi_task *task, 1344 struct common_sol_cqe *csol_cqe) 1345 { 1346 struct iscsi_tm_rsp *hdr; 1347 struct iscsi_conn *conn = beiscsi_conn->conn; 1348 struct beiscsi_io_task *io_task = task->dd_data; 1349 1350 hdr = (struct iscsi_tm_rsp *)task->hdr; 1351 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1352 hdr->flags = csol_cqe->i_flags; 1353 hdr->response = csol_cqe->i_resp; 1354 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1355 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1356 csol_cqe->cmd_wnd - 1); 1357 1358 hdr->itt = io_task->libiscsi_itt; 1359 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1360 } 1361 1362 static void 1363 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1364 struct beiscsi_hba *phba, struct sol_cqe *psol) 1365 { 1366 struct hwi_wrb_context *pwrb_context; 1367 struct wrb_handle *pwrb_handle = NULL; 1368 struct hwi_controller *phwi_ctrlr; 1369 struct iscsi_task *task; 1370 struct beiscsi_io_task *io_task; 1371 uint16_t wrb_index, cid, cri_index; 1372 1373 phwi_ctrlr = phba->phwi_ctrlr; 1374 if (is_chip_be2_be3r(phba)) { 1375 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1376 wrb_idx, psol); 1377 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1378 cid, psol); 1379 } else { 1380 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1381 wrb_idx, psol); 1382 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1383 cid, psol); 1384 } 1385 1386 cri_index = BE_GET_CRI_FROM_CID(cid); 1387 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1388 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1389 task = pwrb_handle->pio_handle; 1390 1391 io_task = task->dd_data; 1392 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); 1393 iscsi_put_task(task); 1394 } 1395 1396 static void 1397 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1398 struct iscsi_task *task, 1399 struct common_sol_cqe *csol_cqe) 1400 { 1401 struct iscsi_nopin *hdr; 1402 struct iscsi_conn *conn = beiscsi_conn->conn; 1403 struct beiscsi_io_task *io_task = task->dd_data; 1404 1405 hdr = (struct iscsi_nopin *)task->hdr; 1406 hdr->flags = csol_cqe->i_flags; 1407 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1408 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1409 csol_cqe->cmd_wnd - 1); 1410 1411 hdr->opcode = ISCSI_OP_NOOP_IN; 1412 hdr->itt = io_task->libiscsi_itt; 1413 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1414 } 1415 1416 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1417 struct sol_cqe *psol, 1418 struct common_sol_cqe *csol_cqe) 1419 { 1420 if (is_chip_be2_be3r(phba)) { 1421 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1422 i_exp_cmd_sn, psol); 1423 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1424 i_res_cnt, psol); 1425 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1426 i_cmd_wnd, psol); 1427 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1428 wrb_index, psol); 1429 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1430 cid, psol); 1431 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1432 hw_sts, psol); 1433 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1434 i_resp, psol); 1435 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1436 i_sts, psol); 1437 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1438 i_flags, psol); 1439 } else { 1440 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1441 i_exp_cmd_sn, psol); 1442 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1443 i_res_cnt, psol); 1444 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1445 wrb_index, psol); 1446 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1447 cid, psol); 1448 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1449 hw_sts, psol); 1450 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1451 i_cmd_wnd, psol); 1452 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1453 cmd_cmpl, psol)) 1454 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1455 i_sts, psol); 1456 else 1457 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1458 i_sts, psol); 1459 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1460 u, psol)) 1461 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1462 1463 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1464 o, psol)) 1465 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1466 } 1467 } 1468 1469 1470 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1471 struct beiscsi_hba *phba, struct sol_cqe *psol) 1472 { 1473 struct hwi_wrb_context *pwrb_context; 1474 struct wrb_handle *pwrb_handle; 1475 struct iscsi_wrb *pwrb = NULL; 1476 struct hwi_controller *phwi_ctrlr; 1477 struct iscsi_task *task; 1478 unsigned int type; 1479 struct iscsi_conn *conn = beiscsi_conn->conn; 1480 struct iscsi_session *session = conn->session; 1481 struct common_sol_cqe csol_cqe = {0}; 1482 uint16_t cri_index = 0; 1483 1484 phwi_ctrlr = phba->phwi_ctrlr; 1485 1486 /* Copy the elements to a common structure */ 1487 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1488 1489 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1490 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1491 1492 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1493 csol_cqe.wrb_index]; 1494 1495 task = pwrb_handle->pio_handle; 1496 pwrb = pwrb_handle->pwrb; 1497 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1498 1499 spin_lock_bh(&session->lock); 1500 switch (type) { 1501 case HWH_TYPE_IO: 1502 case HWH_TYPE_IO_RD: 1503 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1504 ISCSI_OP_NOOP_OUT) 1505 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1506 else 1507 be_complete_io(beiscsi_conn, task, &csol_cqe); 1508 break; 1509 1510 case HWH_TYPE_LOGOUT: 1511 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1512 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1513 else 1514 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1515 break; 1516 1517 case HWH_TYPE_LOGIN: 1518 beiscsi_log(phba, KERN_ERR, 1519 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1520 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1521 " hwi_complete_cmd- Solicited path\n"); 1522 break; 1523 1524 case HWH_TYPE_NOP: 1525 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1526 break; 1527 1528 default: 1529 beiscsi_log(phba, KERN_WARNING, 1530 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1531 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1532 "wrb_index 0x%x CID 0x%x\n", type, 1533 csol_cqe.wrb_index, 1534 csol_cqe.cid); 1535 break; 1536 } 1537 1538 spin_unlock_bh(&session->lock); 1539 } 1540 1541 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1542 *pasync_ctx, unsigned int is_header, 1543 unsigned int host_write_ptr) 1544 { 1545 if (is_header) 1546 return &pasync_ctx->async_entry[host_write_ptr]. 1547 header_busy_list; 1548 else 1549 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1550 } 1551 1552 static struct async_pdu_handle * 1553 hwi_get_async_handle(struct beiscsi_hba *phba, 1554 struct beiscsi_conn *beiscsi_conn, 1555 struct hwi_async_pdu_context *pasync_ctx, 1556 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1557 { 1558 struct be_bus_address phys_addr; 1559 struct list_head *pbusy_list; 1560 struct async_pdu_handle *pasync_handle = NULL; 1561 unsigned char is_header = 0; 1562 unsigned int index, dpl; 1563 1564 if (is_chip_be2_be3r(phba)) { 1565 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1566 dpl, pdpdu_cqe); 1567 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1568 index, pdpdu_cqe); 1569 } else { 1570 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1571 dpl, pdpdu_cqe); 1572 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1573 index, pdpdu_cqe); 1574 } 1575 1576 phys_addr.u.a32.address_lo = 1577 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1578 db_addr_lo) / 32] - dpl); 1579 phys_addr.u.a32.address_hi = 1580 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1581 db_addr_hi) / 32]; 1582 1583 phys_addr.u.a64.address = 1584 *((unsigned long long *)(&phys_addr.u.a64.address)); 1585 1586 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1587 & PDUCQE_CODE_MASK) { 1588 case UNSOL_HDR_NOTIFY: 1589 is_header = 1; 1590 1591 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1592 is_header, index); 1593 break; 1594 case UNSOL_DATA_NOTIFY: 1595 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1596 is_header, index); 1597 break; 1598 default: 1599 pbusy_list = NULL; 1600 beiscsi_log(phba, KERN_WARNING, 1601 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1602 "BM_%d : Unexpected code=%d\n", 1603 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1604 code) / 32] & PDUCQE_CODE_MASK); 1605 return NULL; 1606 } 1607 1608 WARN_ON(list_empty(pbusy_list)); 1609 list_for_each_entry(pasync_handle, pbusy_list, link) { 1610 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1611 break; 1612 } 1613 1614 WARN_ON(!pasync_handle); 1615 1616 pasync_handle->cri = 1617 BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); 1618 pasync_handle->is_header = is_header; 1619 pasync_handle->buffer_len = dpl; 1620 *pcq_index = index; 1621 1622 return pasync_handle; 1623 } 1624 1625 static unsigned int 1626 hwi_update_async_writables(struct beiscsi_hba *phba, 1627 struct hwi_async_pdu_context *pasync_ctx, 1628 unsigned int is_header, unsigned int cq_index) 1629 { 1630 struct list_head *pbusy_list; 1631 struct async_pdu_handle *pasync_handle; 1632 unsigned int num_entries, writables = 0; 1633 unsigned int *pep_read_ptr, *pwritables; 1634 1635 num_entries = pasync_ctx->num_entries; 1636 if (is_header) { 1637 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1638 pwritables = &pasync_ctx->async_header.writables; 1639 } else { 1640 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1641 pwritables = &pasync_ctx->async_data.writables; 1642 } 1643 1644 while ((*pep_read_ptr) != cq_index) { 1645 (*pep_read_ptr)++; 1646 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1647 1648 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1649 *pep_read_ptr); 1650 if (writables == 0) 1651 WARN_ON(list_empty(pbusy_list)); 1652 1653 if (!list_empty(pbusy_list)) { 1654 pasync_handle = list_entry(pbusy_list->next, 1655 struct async_pdu_handle, 1656 link); 1657 WARN_ON(!pasync_handle); 1658 pasync_handle->consumed = 1; 1659 } 1660 1661 writables++; 1662 } 1663 1664 if (!writables) { 1665 beiscsi_log(phba, KERN_ERR, 1666 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1667 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1668 cq_index); 1669 WARN_ON(1); 1670 } 1671 1672 *pwritables = *pwritables + writables; 1673 return 0; 1674 } 1675 1676 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1677 unsigned int cri) 1678 { 1679 struct hwi_controller *phwi_ctrlr; 1680 struct hwi_async_pdu_context *pasync_ctx; 1681 struct async_pdu_handle *pasync_handle, *tmp_handle; 1682 struct list_head *plist; 1683 1684 phwi_ctrlr = phba->phwi_ctrlr; 1685 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1686 1687 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1688 1689 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1690 list_del(&pasync_handle->link); 1691 1692 if (pasync_handle->is_header) { 1693 list_add_tail(&pasync_handle->link, 1694 &pasync_ctx->async_header.free_list); 1695 pasync_ctx->async_header.free_entries++; 1696 } else { 1697 list_add_tail(&pasync_handle->link, 1698 &pasync_ctx->async_data.free_list); 1699 pasync_ctx->async_data.free_entries++; 1700 } 1701 } 1702 1703 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1704 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1705 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1706 } 1707 1708 static struct phys_addr * 1709 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1710 unsigned int is_header, unsigned int host_write_ptr) 1711 { 1712 struct phys_addr *pasync_sge = NULL; 1713 1714 if (is_header) 1715 pasync_sge = pasync_ctx->async_header.ring_base; 1716 else 1717 pasync_sge = pasync_ctx->async_data.ring_base; 1718 1719 return pasync_sge + host_write_ptr; 1720 } 1721 1722 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1723 unsigned int is_header) 1724 { 1725 struct hwi_controller *phwi_ctrlr; 1726 struct hwi_async_pdu_context *pasync_ctx; 1727 struct async_pdu_handle *pasync_handle; 1728 struct list_head *pfree_link, *pbusy_list; 1729 struct phys_addr *pasync_sge; 1730 unsigned int ring_id, num_entries; 1731 unsigned int host_write_num; 1732 unsigned int writables; 1733 unsigned int i = 0; 1734 u32 doorbell = 0; 1735 1736 phwi_ctrlr = phba->phwi_ctrlr; 1737 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1738 num_entries = pasync_ctx->num_entries; 1739 1740 if (is_header) { 1741 writables = min(pasync_ctx->async_header.writables, 1742 pasync_ctx->async_header.free_entries); 1743 pfree_link = pasync_ctx->async_header.free_list.next; 1744 host_write_num = pasync_ctx->async_header.host_write_ptr; 1745 ring_id = phwi_ctrlr->default_pdu_hdr.id; 1746 } else { 1747 writables = min(pasync_ctx->async_data.writables, 1748 pasync_ctx->async_data.free_entries); 1749 pfree_link = pasync_ctx->async_data.free_list.next; 1750 host_write_num = pasync_ctx->async_data.host_write_ptr; 1751 ring_id = phwi_ctrlr->default_pdu_data.id; 1752 } 1753 1754 writables = (writables / 8) * 8; 1755 if (writables) { 1756 for (i = 0; i < writables; i++) { 1757 pbusy_list = 1758 hwi_get_async_busy_list(pasync_ctx, is_header, 1759 host_write_num); 1760 pasync_handle = 1761 list_entry(pfree_link, struct async_pdu_handle, 1762 link); 1763 WARN_ON(!pasync_handle); 1764 pasync_handle->consumed = 0; 1765 1766 pfree_link = pfree_link->next; 1767 1768 pasync_sge = hwi_get_ring_address(pasync_ctx, 1769 is_header, host_write_num); 1770 1771 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1772 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1773 1774 list_move(&pasync_handle->link, pbusy_list); 1775 1776 host_write_num++; 1777 host_write_num = host_write_num % num_entries; 1778 } 1779 1780 if (is_header) { 1781 pasync_ctx->async_header.host_write_ptr = 1782 host_write_num; 1783 pasync_ctx->async_header.free_entries -= writables; 1784 pasync_ctx->async_header.writables -= writables; 1785 pasync_ctx->async_header.busy_entries += writables; 1786 } else { 1787 pasync_ctx->async_data.host_write_ptr = host_write_num; 1788 pasync_ctx->async_data.free_entries -= writables; 1789 pasync_ctx->async_data.writables -= writables; 1790 pasync_ctx->async_data.busy_entries += writables; 1791 } 1792 1793 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1794 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1795 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1796 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1797 << DB_DEF_PDU_CQPROC_SHIFT; 1798 1799 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET); 1800 } 1801 } 1802 1803 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1804 struct beiscsi_conn *beiscsi_conn, 1805 struct i_t_dpdu_cqe *pdpdu_cqe) 1806 { 1807 struct hwi_controller *phwi_ctrlr; 1808 struct hwi_async_pdu_context *pasync_ctx; 1809 struct async_pdu_handle *pasync_handle = NULL; 1810 unsigned int cq_index = -1; 1811 1812 phwi_ctrlr = phba->phwi_ctrlr; 1813 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1814 1815 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1816 pdpdu_cqe, &cq_index); 1817 BUG_ON(pasync_handle->is_header != 0); 1818 if (pasync_handle->consumed == 0) 1819 hwi_update_async_writables(phba, pasync_ctx, 1820 pasync_handle->is_header, cq_index); 1821 1822 hwi_free_async_msg(phba, pasync_handle->cri); 1823 hwi_post_async_buffers(phba, pasync_handle->is_header); 1824 } 1825 1826 static unsigned int 1827 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1828 struct beiscsi_hba *phba, 1829 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1830 { 1831 struct list_head *plist; 1832 struct async_pdu_handle *pasync_handle; 1833 void *phdr = NULL; 1834 unsigned int hdr_len = 0, buf_len = 0; 1835 unsigned int status, index = 0, offset = 0; 1836 void *pfirst_buffer = NULL; 1837 unsigned int num_buf = 0; 1838 1839 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1840 1841 list_for_each_entry(pasync_handle, plist, link) { 1842 if (index == 0) { 1843 phdr = pasync_handle->pbuffer; 1844 hdr_len = pasync_handle->buffer_len; 1845 } else { 1846 buf_len = pasync_handle->buffer_len; 1847 if (!num_buf) { 1848 pfirst_buffer = pasync_handle->pbuffer; 1849 num_buf++; 1850 } 1851 memcpy(pfirst_buffer + offset, 1852 pasync_handle->pbuffer, buf_len); 1853 offset += buf_len; 1854 } 1855 index++; 1856 } 1857 1858 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1859 phdr, hdr_len, pfirst_buffer, 1860 offset); 1861 1862 hwi_free_async_msg(phba, cri); 1863 return 0; 1864 } 1865 1866 static unsigned int 1867 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1868 struct beiscsi_hba *phba, 1869 struct async_pdu_handle *pasync_handle) 1870 { 1871 struct hwi_async_pdu_context *pasync_ctx; 1872 struct hwi_controller *phwi_ctrlr; 1873 unsigned int bytes_needed = 0, status = 0; 1874 unsigned short cri = pasync_handle->cri; 1875 struct pdu_base *ppdu; 1876 1877 phwi_ctrlr = phba->phwi_ctrlr; 1878 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1879 1880 list_del(&pasync_handle->link); 1881 if (pasync_handle->is_header) { 1882 pasync_ctx->async_header.busy_entries--; 1883 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1884 hwi_free_async_msg(phba, cri); 1885 BUG(); 1886 } 1887 1888 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1889 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1890 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1891 (unsigned short)pasync_handle->buffer_len; 1892 list_add_tail(&pasync_handle->link, 1893 &pasync_ctx->async_entry[cri].wait_queue.list); 1894 1895 ppdu = pasync_handle->pbuffer; 1896 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1897 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1898 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1899 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1900 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1901 1902 if (status == 0) { 1903 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1904 bytes_needed; 1905 1906 if (bytes_needed == 0) 1907 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1908 pasync_ctx, cri); 1909 } 1910 } else { 1911 pasync_ctx->async_data.busy_entries--; 1912 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1913 list_add_tail(&pasync_handle->link, 1914 &pasync_ctx->async_entry[cri].wait_queue. 1915 list); 1916 pasync_ctx->async_entry[cri].wait_queue. 1917 bytes_received += 1918 (unsigned short)pasync_handle->buffer_len; 1919 1920 if (pasync_ctx->async_entry[cri].wait_queue. 1921 bytes_received >= 1922 pasync_ctx->async_entry[cri].wait_queue. 1923 bytes_needed) 1924 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1925 pasync_ctx, cri); 1926 } 1927 } 1928 return status; 1929 } 1930 1931 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 1932 struct beiscsi_hba *phba, 1933 struct i_t_dpdu_cqe *pdpdu_cqe) 1934 { 1935 struct hwi_controller *phwi_ctrlr; 1936 struct hwi_async_pdu_context *pasync_ctx; 1937 struct async_pdu_handle *pasync_handle = NULL; 1938 unsigned int cq_index = -1; 1939 1940 phwi_ctrlr = phba->phwi_ctrlr; 1941 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr); 1942 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1943 pdpdu_cqe, &cq_index); 1944 1945 if (pasync_handle->consumed == 0) 1946 hwi_update_async_writables(phba, pasync_ctx, 1947 pasync_handle->is_header, cq_index); 1948 1949 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 1950 hwi_post_async_buffers(phba, pasync_handle->is_header); 1951 } 1952 1953 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 1954 { 1955 struct be_queue_info *mcc_cq; 1956 struct be_mcc_compl *mcc_compl; 1957 unsigned int num_processed = 0; 1958 1959 mcc_cq = &phba->ctrl.mcc_obj.cq; 1960 mcc_compl = queue_tail_node(mcc_cq); 1961 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1962 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 1963 1964 if (num_processed >= 32) { 1965 hwi_ring_cq_db(phba, mcc_cq->id, 1966 num_processed, 0, 0); 1967 num_processed = 0; 1968 } 1969 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 1970 /* Interpret flags as an async trailer */ 1971 if (is_link_state_evt(mcc_compl->flags)) 1972 /* Interpret compl as a async link evt */ 1973 beiscsi_async_link_state_process(phba, 1974 (struct be_async_event_link_state *) mcc_compl); 1975 else 1976 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 1977 "BM_%d : Unsupported Async Event, flags" 1978 " = 0x%08x\n", 1979 mcc_compl->flags); 1980 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 1981 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 1982 atomic_dec(&phba->ctrl.mcc_obj.q.used); 1983 } 1984 1985 mcc_compl->flags = 0; 1986 queue_tail_inc(mcc_cq); 1987 mcc_compl = queue_tail_node(mcc_cq); 1988 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 1989 num_processed++; 1990 } 1991 1992 if (num_processed > 0) 1993 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 1994 1995 } 1996 1997 /** 1998 * beiscsi_process_cq()- Process the Completion Queue 1999 * @pbe_eq: Event Q on which the Completion has come 2000 * 2001 * return 2002 * Number of Completion Entries processed. 2003 **/ 2004 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 2005 { 2006 struct be_queue_info *cq; 2007 struct sol_cqe *sol; 2008 struct dmsg_cqe *dmsg; 2009 unsigned int num_processed = 0; 2010 unsigned int tot_nump = 0; 2011 unsigned short code = 0, cid = 0; 2012 uint16_t cri_index = 0; 2013 struct beiscsi_conn *beiscsi_conn; 2014 struct beiscsi_endpoint *beiscsi_ep; 2015 struct iscsi_endpoint *ep; 2016 struct beiscsi_hba *phba; 2017 2018 cq = pbe_eq->cq; 2019 sol = queue_tail_node(cq); 2020 phba = pbe_eq->phba; 2021 2022 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 2023 CQE_VALID_MASK) { 2024 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 2025 2026 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 2027 32] & CQE_CODE_MASK); 2028 2029 /* Get the CID */ 2030 if (is_chip_be2_be3r(phba)) { 2031 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 2032 } else { 2033 if ((code == DRIVERMSG_NOTIFY) || 2034 (code == UNSOL_HDR_NOTIFY) || 2035 (code == UNSOL_DATA_NOTIFY)) 2036 cid = AMAP_GET_BITS( 2037 struct amap_i_t_dpdu_cqe_v2, 2038 cid, sol); 2039 else 2040 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2041 cid, sol); 2042 } 2043 2044 cri_index = BE_GET_CRI_FROM_CID(cid); 2045 ep = phba->ep_array[cri_index]; 2046 beiscsi_ep = ep->dd_data; 2047 beiscsi_conn = beiscsi_ep->conn; 2048 2049 if (num_processed >= 32) { 2050 hwi_ring_cq_db(phba, cq->id, 2051 num_processed, 0, 0); 2052 tot_nump += num_processed; 2053 num_processed = 0; 2054 } 2055 2056 switch (code) { 2057 case SOL_CMD_COMPLETE: 2058 hwi_complete_cmd(beiscsi_conn, phba, sol); 2059 break; 2060 case DRIVERMSG_NOTIFY: 2061 beiscsi_log(phba, KERN_INFO, 2062 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2063 "BM_%d : Received %s[%d] on CID : %d\n", 2064 cqe_desc[code], code, cid); 2065 2066 dmsg = (struct dmsg_cqe *)sol; 2067 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 2068 break; 2069 case UNSOL_HDR_NOTIFY: 2070 beiscsi_log(phba, KERN_INFO, 2071 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2072 "BM_%d : Received %s[%d] on CID : %d\n", 2073 cqe_desc[code], code, cid); 2074 2075 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2076 (struct i_t_dpdu_cqe *)sol); 2077 break; 2078 case UNSOL_DATA_NOTIFY: 2079 beiscsi_log(phba, KERN_INFO, 2080 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2081 "BM_%d : Received %s[%d] on CID : %d\n", 2082 cqe_desc[code], code, cid); 2083 2084 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2085 (struct i_t_dpdu_cqe *)sol); 2086 break; 2087 case CXN_INVALIDATE_INDEX_NOTIFY: 2088 case CMD_INVALIDATED_NOTIFY: 2089 case CXN_INVALIDATE_NOTIFY: 2090 beiscsi_log(phba, KERN_ERR, 2091 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2092 "BM_%d : Ignoring %s[%d] on CID : %d\n", 2093 cqe_desc[code], code, cid); 2094 break; 2095 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 2096 case CMD_KILLED_INVALID_STATSN_RCVD: 2097 case CMD_KILLED_INVALID_R2T_RCVD: 2098 case CMD_CXN_KILLED_LUN_INVALID: 2099 case CMD_CXN_KILLED_ICD_INVALID: 2100 case CMD_CXN_KILLED_ITT_INVALID: 2101 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 2102 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 2103 beiscsi_log(phba, KERN_ERR, 2104 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2105 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 2106 cqe_desc[code], code, cid); 2107 break; 2108 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2109 beiscsi_log(phba, KERN_ERR, 2110 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2111 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2112 cqe_desc[code], code, cid); 2113 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2114 (struct i_t_dpdu_cqe *) sol); 2115 break; 2116 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2117 case CXN_KILLED_BURST_LEN_MISMATCH: 2118 case CXN_KILLED_AHS_RCVD: 2119 case CXN_KILLED_HDR_DIGEST_ERR: 2120 case CXN_KILLED_UNKNOWN_HDR: 2121 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2122 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2123 case CXN_KILLED_TIMED_OUT: 2124 case CXN_KILLED_FIN_RCVD: 2125 case CXN_KILLED_RST_SENT: 2126 case CXN_KILLED_RST_RCVD: 2127 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2128 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2129 case CXN_KILLED_OVER_RUN_RESIDUAL: 2130 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2131 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2132 beiscsi_log(phba, KERN_ERR, 2133 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2134 "BM_%d : Event %s[%d] received on CID : %d\n", 2135 cqe_desc[code], code, cid); 2136 if (beiscsi_conn) 2137 iscsi_conn_failure(beiscsi_conn->conn, 2138 ISCSI_ERR_CONN_FAILED); 2139 break; 2140 default: 2141 beiscsi_log(phba, KERN_ERR, 2142 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2143 "BM_%d : Invalid CQE Event Received Code : %d" 2144 "CID 0x%x...\n", 2145 code, cid); 2146 break; 2147 } 2148 2149 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2150 queue_tail_inc(cq); 2151 sol = queue_tail_node(cq); 2152 num_processed++; 2153 } 2154 2155 if (num_processed > 0) { 2156 tot_nump += num_processed; 2157 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 2158 } 2159 return tot_nump; 2160 } 2161 2162 void beiscsi_process_all_cqs(struct work_struct *work) 2163 { 2164 unsigned long flags; 2165 struct hwi_controller *phwi_ctrlr; 2166 struct hwi_context_memory *phwi_context; 2167 struct beiscsi_hba *phba; 2168 struct be_eq_obj *pbe_eq = 2169 container_of(work, struct be_eq_obj, work_cqs); 2170 2171 phba = pbe_eq->phba; 2172 phwi_ctrlr = phba->phwi_ctrlr; 2173 phwi_context = phwi_ctrlr->phwi_ctxt; 2174 2175 if (pbe_eq->todo_mcc_cq) { 2176 spin_lock_irqsave(&phba->isr_lock, flags); 2177 pbe_eq->todo_mcc_cq = false; 2178 spin_unlock_irqrestore(&phba->isr_lock, flags); 2179 beiscsi_process_mcc_isr(phba); 2180 } 2181 2182 if (pbe_eq->todo_cq) { 2183 spin_lock_irqsave(&phba->isr_lock, flags); 2184 pbe_eq->todo_cq = false; 2185 spin_unlock_irqrestore(&phba->isr_lock, flags); 2186 beiscsi_process_cq(pbe_eq); 2187 } 2188 2189 /* rearm EQ for further interrupts */ 2190 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2191 } 2192 2193 static int be_iopoll(struct blk_iopoll *iop, int budget) 2194 { 2195 unsigned int ret; 2196 struct beiscsi_hba *phba; 2197 struct be_eq_obj *pbe_eq; 2198 2199 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2200 ret = beiscsi_process_cq(pbe_eq); 2201 if (ret < budget) { 2202 phba = pbe_eq->phba; 2203 blk_iopoll_complete(iop); 2204 beiscsi_log(phba, KERN_INFO, 2205 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2206 "BM_%d : rearm pbe_eq->q.id =%d\n", 2207 pbe_eq->q.id); 2208 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2209 } 2210 return ret; 2211 } 2212 2213 static void 2214 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2215 unsigned int num_sg, struct beiscsi_io_task *io_task) 2216 { 2217 struct iscsi_sge *psgl; 2218 unsigned int sg_len, index; 2219 unsigned int sge_len = 0; 2220 unsigned long long addr; 2221 struct scatterlist *l_sg; 2222 unsigned int offset; 2223 2224 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2225 io_task->bhs_pa.u.a32.address_lo); 2226 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2227 io_task->bhs_pa.u.a32.address_hi); 2228 2229 l_sg = sg; 2230 for (index = 0; (index < num_sg) && (index < 2); index++, 2231 sg = sg_next(sg)) { 2232 if (index == 0) { 2233 sg_len = sg_dma_len(sg); 2234 addr = (u64) sg_dma_address(sg); 2235 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2236 sge0_addr_lo, pwrb, 2237 lower_32_bits(addr)); 2238 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2239 sge0_addr_hi, pwrb, 2240 upper_32_bits(addr)); 2241 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2242 sge0_len, pwrb, 2243 sg_len); 2244 sge_len = sg_len; 2245 } else { 2246 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2247 pwrb, sge_len); 2248 sg_len = sg_dma_len(sg); 2249 addr = (u64) sg_dma_address(sg); 2250 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2251 sge1_addr_lo, pwrb, 2252 lower_32_bits(addr)); 2253 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2254 sge1_addr_hi, pwrb, 2255 upper_32_bits(addr)); 2256 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2257 sge1_len, pwrb, 2258 sg_len); 2259 } 2260 } 2261 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2262 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2263 2264 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2265 2266 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2267 io_task->bhs_pa.u.a32.address_hi); 2268 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2269 io_task->bhs_pa.u.a32.address_lo); 2270 2271 if (num_sg == 1) { 2272 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2273 1); 2274 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2275 0); 2276 } else if (num_sg == 2) { 2277 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2278 0); 2279 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2280 1); 2281 } else { 2282 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2283 0); 2284 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2285 0); 2286 } 2287 2288 sg = l_sg; 2289 psgl++; 2290 psgl++; 2291 offset = 0; 2292 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2293 sg_len = sg_dma_len(sg); 2294 addr = (u64) sg_dma_address(sg); 2295 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2296 lower_32_bits(addr)); 2297 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2298 upper_32_bits(addr)); 2299 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2300 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2301 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2302 offset += sg_len; 2303 } 2304 psgl--; 2305 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2306 } 2307 2308 static void 2309 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2310 unsigned int num_sg, struct beiscsi_io_task *io_task) 2311 { 2312 struct iscsi_sge *psgl; 2313 unsigned int sg_len, index; 2314 unsigned int sge_len = 0; 2315 unsigned long long addr; 2316 struct scatterlist *l_sg; 2317 unsigned int offset; 2318 2319 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2320 io_task->bhs_pa.u.a32.address_lo); 2321 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2322 io_task->bhs_pa.u.a32.address_hi); 2323 2324 l_sg = sg; 2325 for (index = 0; (index < num_sg) && (index < 2); index++, 2326 sg = sg_next(sg)) { 2327 if (index == 0) { 2328 sg_len = sg_dma_len(sg); 2329 addr = (u64) sg_dma_address(sg); 2330 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2331 ((u32)(addr & 0xFFFFFFFF))); 2332 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2333 ((u32)(addr >> 32))); 2334 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2335 sg_len); 2336 sge_len = sg_len; 2337 } else { 2338 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2339 pwrb, sge_len); 2340 sg_len = sg_dma_len(sg); 2341 addr = (u64) sg_dma_address(sg); 2342 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2343 ((u32)(addr & 0xFFFFFFFF))); 2344 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2345 ((u32)(addr >> 32))); 2346 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2347 sg_len); 2348 } 2349 } 2350 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2351 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2352 2353 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2354 2355 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2356 io_task->bhs_pa.u.a32.address_hi); 2357 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2358 io_task->bhs_pa.u.a32.address_lo); 2359 2360 if (num_sg == 1) { 2361 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2362 1); 2363 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2364 0); 2365 } else if (num_sg == 2) { 2366 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2367 0); 2368 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2369 1); 2370 } else { 2371 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2372 0); 2373 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2374 0); 2375 } 2376 sg = l_sg; 2377 psgl++; 2378 psgl++; 2379 offset = 0; 2380 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2381 sg_len = sg_dma_len(sg); 2382 addr = (u64) sg_dma_address(sg); 2383 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2384 (addr & 0xFFFFFFFF)); 2385 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2386 (addr >> 32)); 2387 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2388 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2389 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2390 offset += sg_len; 2391 } 2392 psgl--; 2393 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2394 } 2395 2396 /** 2397 * hwi_write_buffer()- Populate the WRB with task info 2398 * @pwrb: ptr to the WRB entry 2399 * @task: iscsi task which is to be executed 2400 **/ 2401 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2402 { 2403 struct iscsi_sge *psgl; 2404 struct beiscsi_io_task *io_task = task->dd_data; 2405 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2406 struct beiscsi_hba *phba = beiscsi_conn->phba; 2407 uint8_t dsp_value = 0; 2408 2409 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2410 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2411 io_task->bhs_pa.u.a32.address_lo); 2412 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2413 io_task->bhs_pa.u.a32.address_hi); 2414 2415 if (task->data) { 2416 2417 /* Check for the data_count */ 2418 dsp_value = (task->data_count) ? 1 : 0; 2419 2420 if (is_chip_be2_be3r(phba)) 2421 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2422 pwrb, dsp_value); 2423 else 2424 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2425 pwrb, dsp_value); 2426 2427 /* Map addr only if there is data_count */ 2428 if (dsp_value) { 2429 io_task->mtask_addr = pci_map_single(phba->pcidev, 2430 task->data, 2431 task->data_count, 2432 PCI_DMA_TODEVICE); 2433 io_task->mtask_data_count = task->data_count; 2434 } else 2435 io_task->mtask_addr = 0; 2436 2437 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2438 lower_32_bits(io_task->mtask_addr)); 2439 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2440 upper_32_bits(io_task->mtask_addr)); 2441 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2442 task->data_count); 2443 2444 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2445 } else { 2446 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2447 io_task->mtask_addr = 0; 2448 } 2449 2450 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2451 2452 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2453 2454 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2455 io_task->bhs_pa.u.a32.address_hi); 2456 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2457 io_task->bhs_pa.u.a32.address_lo); 2458 if (task->data) { 2459 psgl++; 2460 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2461 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2462 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2463 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2464 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2465 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2466 2467 psgl++; 2468 if (task->data) { 2469 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2470 lower_32_bits(io_task->mtask_addr)); 2471 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2472 upper_32_bits(io_task->mtask_addr)); 2473 } 2474 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2475 } 2476 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2477 } 2478 2479 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2480 { 2481 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2482 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2483 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2484 2485 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2486 sizeof(struct sol_cqe)); 2487 num_async_pdu_buf_pages = 2488 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2489 phba->params.defpdu_hdr_sz); 2490 num_async_pdu_buf_sgl_pages = 2491 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2492 sizeof(struct phys_addr)); 2493 num_async_pdu_data_pages = 2494 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2495 phba->params.defpdu_data_sz); 2496 num_async_pdu_data_sgl_pages = 2497 PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \ 2498 sizeof(struct phys_addr)); 2499 2500 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2501 2502 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2503 BE_ISCSI_PDU_HEADER_SIZE; 2504 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2505 sizeof(struct hwi_context_memory); 2506 2507 2508 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2509 * (phba->params.wrbs_per_cxn) 2510 * phba->params.cxns_per_ctrl; 2511 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2512 (phba->params.wrbs_per_cxn); 2513 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2514 phba->params.cxns_per_ctrl); 2515 2516 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2517 phba->params.icds_per_ctrl; 2518 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2519 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2520 2521 phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] = 2522 num_async_pdu_buf_pages * PAGE_SIZE; 2523 phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] = 2524 num_async_pdu_data_pages * PAGE_SIZE; 2525 phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] = 2526 num_async_pdu_buf_sgl_pages * PAGE_SIZE; 2527 phba->mem_req[HWI_MEM_ASYNC_DATA_RING] = 2528 num_async_pdu_data_sgl_pages * PAGE_SIZE; 2529 phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] = 2530 phba->params.asyncpdus_per_ctrl * 2531 sizeof(struct async_pdu_handle); 2532 phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] = 2533 phba->params.asyncpdus_per_ctrl * 2534 sizeof(struct async_pdu_handle); 2535 phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] = 2536 sizeof(struct hwi_async_pdu_context) + 2537 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry)); 2538 } 2539 2540 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2541 { 2542 dma_addr_t bus_add; 2543 struct hwi_controller *phwi_ctrlr; 2544 struct be_mem_descriptor *mem_descr; 2545 struct mem_array *mem_arr, *mem_arr_orig; 2546 unsigned int i, j, alloc_size, curr_alloc_size; 2547 2548 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2549 if (!phba->phwi_ctrlr) 2550 return -ENOMEM; 2551 2552 /* Allocate memory for wrb_context */ 2553 phwi_ctrlr = phba->phwi_ctrlr; 2554 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2555 phba->params.cxns_per_ctrl, 2556 GFP_KERNEL); 2557 if (!phwi_ctrlr->wrb_context) 2558 return -ENOMEM; 2559 2560 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2561 GFP_KERNEL); 2562 if (!phba->init_mem) { 2563 kfree(phwi_ctrlr->wrb_context); 2564 kfree(phba->phwi_ctrlr); 2565 return -ENOMEM; 2566 } 2567 2568 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2569 GFP_KERNEL); 2570 if (!mem_arr_orig) { 2571 kfree(phba->init_mem); 2572 kfree(phwi_ctrlr->wrb_context); 2573 kfree(phba->phwi_ctrlr); 2574 return -ENOMEM; 2575 } 2576 2577 mem_descr = phba->init_mem; 2578 for (i = 0; i < SE_MEM_MAX; i++) { 2579 j = 0; 2580 mem_arr = mem_arr_orig; 2581 alloc_size = phba->mem_req[i]; 2582 memset(mem_arr, 0, sizeof(struct mem_array) * 2583 BEISCSI_MAX_FRAGS_INIT); 2584 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2585 do { 2586 mem_arr->virtual_address = pci_alloc_consistent( 2587 phba->pcidev, 2588 curr_alloc_size, 2589 &bus_add); 2590 if (!mem_arr->virtual_address) { 2591 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2592 goto free_mem; 2593 if (curr_alloc_size - 2594 rounddown_pow_of_two(curr_alloc_size)) 2595 curr_alloc_size = rounddown_pow_of_two 2596 (curr_alloc_size); 2597 else 2598 curr_alloc_size = curr_alloc_size / 2; 2599 } else { 2600 mem_arr->bus_address.u. 2601 a64.address = (__u64) bus_add; 2602 mem_arr->size = curr_alloc_size; 2603 alloc_size -= curr_alloc_size; 2604 curr_alloc_size = min(be_max_phys_size * 2605 1024, alloc_size); 2606 j++; 2607 mem_arr++; 2608 } 2609 } while (alloc_size); 2610 mem_descr->num_elements = j; 2611 mem_descr->size_in_bytes = phba->mem_req[i]; 2612 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2613 GFP_KERNEL); 2614 if (!mem_descr->mem_array) 2615 goto free_mem; 2616 2617 memcpy(mem_descr->mem_array, mem_arr_orig, 2618 sizeof(struct mem_array) * j); 2619 mem_descr++; 2620 } 2621 kfree(mem_arr_orig); 2622 return 0; 2623 free_mem: 2624 mem_descr->num_elements = j; 2625 while ((i) || (j)) { 2626 for (j = mem_descr->num_elements; j > 0; j--) { 2627 pci_free_consistent(phba->pcidev, 2628 mem_descr->mem_array[j - 1].size, 2629 mem_descr->mem_array[j - 1]. 2630 virtual_address, 2631 (unsigned long)mem_descr-> 2632 mem_array[j - 1]. 2633 bus_address.u.a64.address); 2634 } 2635 if (i) { 2636 i--; 2637 kfree(mem_descr->mem_array); 2638 mem_descr--; 2639 } 2640 } 2641 kfree(mem_arr_orig); 2642 kfree(phba->init_mem); 2643 kfree(phba->phwi_ctrlr->wrb_context); 2644 kfree(phba->phwi_ctrlr); 2645 return -ENOMEM; 2646 } 2647 2648 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2649 { 2650 beiscsi_find_mem_req(phba); 2651 return beiscsi_alloc_mem(phba); 2652 } 2653 2654 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2655 { 2656 struct pdu_data_out *pdata_out; 2657 struct pdu_nop_out *pnop_out; 2658 struct be_mem_descriptor *mem_descr; 2659 2660 mem_descr = phba->init_mem; 2661 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2662 pdata_out = 2663 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2664 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2665 2666 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2667 IIOC_SCSI_DATA); 2668 2669 pnop_out = 2670 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2671 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2672 2673 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2674 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2675 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2676 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2677 } 2678 2679 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2680 { 2681 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2682 struct hwi_context_memory *phwi_ctxt; 2683 struct wrb_handle *pwrb_handle = NULL; 2684 struct hwi_controller *phwi_ctrlr; 2685 struct hwi_wrb_context *pwrb_context; 2686 struct iscsi_wrb *pwrb = NULL; 2687 unsigned int num_cxn_wrbh = 0; 2688 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2689 2690 mem_descr_wrbh = phba->init_mem; 2691 mem_descr_wrbh += HWI_MEM_WRBH; 2692 2693 mem_descr_wrb = phba->init_mem; 2694 mem_descr_wrb += HWI_MEM_WRB; 2695 phwi_ctrlr = phba->phwi_ctrlr; 2696 2697 /* Allocate memory for WRBQ */ 2698 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2699 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2700 phba->fw_config.iscsi_cid_count, 2701 GFP_KERNEL); 2702 if (!phwi_ctxt->be_wrbq) { 2703 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2704 "BM_%d : WRBQ Mem Alloc Failed\n"); 2705 return -ENOMEM; 2706 } 2707 2708 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2709 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2710 pwrb_context->pwrb_handle_base = 2711 kzalloc(sizeof(struct wrb_handle *) * 2712 phba->params.wrbs_per_cxn, GFP_KERNEL); 2713 if (!pwrb_context->pwrb_handle_base) { 2714 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2715 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2716 goto init_wrb_hndl_failed; 2717 } 2718 pwrb_context->pwrb_handle_basestd = 2719 kzalloc(sizeof(struct wrb_handle *) * 2720 phba->params.wrbs_per_cxn, GFP_KERNEL); 2721 if (!pwrb_context->pwrb_handle_basestd) { 2722 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2723 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2724 goto init_wrb_hndl_failed; 2725 } 2726 if (!num_cxn_wrbh) { 2727 pwrb_handle = 2728 mem_descr_wrbh->mem_array[idx].virtual_address; 2729 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2730 ((sizeof(struct wrb_handle)) * 2731 phba->params.wrbs_per_cxn)); 2732 idx++; 2733 } 2734 pwrb_context->alloc_index = 0; 2735 pwrb_context->wrb_handles_available = 0; 2736 pwrb_context->free_index = 0; 2737 2738 if (num_cxn_wrbh) { 2739 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2740 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2741 pwrb_context->pwrb_handle_basestd[j] = 2742 pwrb_handle; 2743 pwrb_context->wrb_handles_available++; 2744 pwrb_handle->wrb_index = j; 2745 pwrb_handle++; 2746 } 2747 num_cxn_wrbh--; 2748 } 2749 } 2750 idx = 0; 2751 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2752 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2753 if (!num_cxn_wrb) { 2754 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2755 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2756 ((sizeof(struct iscsi_wrb) * 2757 phba->params.wrbs_per_cxn)); 2758 idx++; 2759 } 2760 2761 if (num_cxn_wrb) { 2762 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2763 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2764 pwrb_handle->pwrb = pwrb; 2765 pwrb++; 2766 } 2767 num_cxn_wrb--; 2768 } 2769 } 2770 return 0; 2771 init_wrb_hndl_failed: 2772 for (j = index; j > 0; j--) { 2773 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2774 kfree(pwrb_context->pwrb_handle_base); 2775 kfree(pwrb_context->pwrb_handle_basestd); 2776 } 2777 return -ENOMEM; 2778 } 2779 2780 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2781 { 2782 struct hwi_controller *phwi_ctrlr; 2783 struct hba_parameters *p = &phba->params; 2784 struct hwi_async_pdu_context *pasync_ctx; 2785 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2786 unsigned int index, idx, num_per_mem, num_async_data; 2787 struct be_mem_descriptor *mem_descr; 2788 2789 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2790 mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT; 2791 2792 phwi_ctrlr = phba->phwi_ctrlr; 2793 phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *) 2794 mem_descr->mem_array[0].virtual_address; 2795 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 2796 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2797 2798 pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * 2799 phba->fw_config.iscsi_cid_count, 2800 GFP_KERNEL); 2801 if (!pasync_ctx->async_entry) { 2802 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2803 "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); 2804 return -ENOMEM; 2805 } 2806 2807 pasync_ctx->num_entries = p->asyncpdus_per_ctrl; 2808 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2809 2810 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2811 mem_descr += HWI_MEM_ASYNC_HEADER_BUF; 2812 if (mem_descr->mem_array[0].virtual_address) { 2813 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2814 "BM_%d : hwi_init_async_pdu_ctx" 2815 " HWI_MEM_ASYNC_HEADER_BUF va=%p\n", 2816 mem_descr->mem_array[0].virtual_address); 2817 } else 2818 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2819 "BM_%d : No Virtual address\n"); 2820 2821 pasync_ctx->async_header.va_base = 2822 mem_descr->mem_array[0].virtual_address; 2823 2824 pasync_ctx->async_header.pa_base.u.a64.address = 2825 mem_descr->mem_array[0].bus_address.u.a64.address; 2826 2827 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2828 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 2829 if (mem_descr->mem_array[0].virtual_address) { 2830 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2831 "BM_%d : hwi_init_async_pdu_ctx" 2832 " HWI_MEM_ASYNC_HEADER_RING va=%p\n", 2833 mem_descr->mem_array[0].virtual_address); 2834 } else 2835 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2836 "BM_%d : No Virtual address\n"); 2837 2838 pasync_ctx->async_header.ring_base = 2839 mem_descr->mem_array[0].virtual_address; 2840 2841 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2842 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE; 2843 if (mem_descr->mem_array[0].virtual_address) { 2844 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2845 "BM_%d : hwi_init_async_pdu_ctx" 2846 " HWI_MEM_ASYNC_HEADER_HANDLE va=%p\n", 2847 mem_descr->mem_array[0].virtual_address); 2848 } else 2849 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2850 "BM_%d : No Virtual address\n"); 2851 2852 pasync_ctx->async_header.handle_base = 2853 mem_descr->mem_array[0].virtual_address; 2854 pasync_ctx->async_header.writables = 0; 2855 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 2856 2857 2858 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2859 mem_descr += HWI_MEM_ASYNC_DATA_RING; 2860 if (mem_descr->mem_array[0].virtual_address) { 2861 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2862 "BM_%d : hwi_init_async_pdu_ctx" 2863 " HWI_MEM_ASYNC_DATA_RING va=%p\n", 2864 mem_descr->mem_array[0].virtual_address); 2865 } else 2866 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2867 "BM_%d : No Virtual address\n"); 2868 2869 pasync_ctx->async_data.ring_base = 2870 mem_descr->mem_array[0].virtual_address; 2871 2872 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2873 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE; 2874 if (!mem_descr->mem_array[0].virtual_address) 2875 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2876 "BM_%d : No Virtual address\n"); 2877 2878 pasync_ctx->async_data.handle_base = 2879 mem_descr->mem_array[0].virtual_address; 2880 pasync_ctx->async_data.writables = 0; 2881 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 2882 2883 pasync_header_h = 2884 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base; 2885 pasync_data_h = 2886 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base; 2887 2888 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2889 mem_descr += HWI_MEM_ASYNC_DATA_BUF; 2890 if (mem_descr->mem_array[0].virtual_address) { 2891 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2892 "BM_%d : hwi_init_async_pdu_ctx" 2893 " HWI_MEM_ASYNC_DATA_BUF va=%p\n", 2894 mem_descr->mem_array[0].virtual_address); 2895 } else 2896 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 2897 "BM_%d : No Virtual address\n"); 2898 2899 idx = 0; 2900 pasync_ctx->async_data.va_base = 2901 mem_descr->mem_array[idx].virtual_address; 2902 pasync_ctx->async_data.pa_base.u.a64.address = 2903 mem_descr->mem_array[idx].bus_address.u.a64.address; 2904 2905 num_async_data = ((mem_descr->mem_array[idx].size) / 2906 phba->params.defpdu_data_sz); 2907 num_per_mem = 0; 2908 2909 for (index = 0; index < p->asyncpdus_per_ctrl; index++) { 2910 pasync_header_h->cri = -1; 2911 pasync_header_h->index = (char)index; 2912 INIT_LIST_HEAD(&pasync_header_h->link); 2913 pasync_header_h->pbuffer = 2914 (void *)((unsigned long) 2915 (pasync_ctx->async_header.va_base) + 2916 (p->defpdu_hdr_sz * index)); 2917 2918 pasync_header_h->pa.u.a64.address = 2919 pasync_ctx->async_header.pa_base.u.a64.address + 2920 (p->defpdu_hdr_sz * index); 2921 2922 list_add_tail(&pasync_header_h->link, 2923 &pasync_ctx->async_header.free_list); 2924 pasync_header_h++; 2925 pasync_ctx->async_header.free_entries++; 2926 pasync_ctx->async_header.writables++; 2927 2928 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list); 2929 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 2930 header_busy_list); 2931 pasync_data_h->cri = -1; 2932 pasync_data_h->index = (char)index; 2933 INIT_LIST_HEAD(&pasync_data_h->link); 2934 2935 if (!num_async_data) { 2936 num_per_mem = 0; 2937 idx++; 2938 pasync_ctx->async_data.va_base = 2939 mem_descr->mem_array[idx].virtual_address; 2940 pasync_ctx->async_data.pa_base.u.a64.address = 2941 mem_descr->mem_array[idx]. 2942 bus_address.u.a64.address; 2943 2944 num_async_data = ((mem_descr->mem_array[idx].size) / 2945 phba->params.defpdu_data_sz); 2946 } 2947 pasync_data_h->pbuffer = 2948 (void *)((unsigned long) 2949 (pasync_ctx->async_data.va_base) + 2950 (p->defpdu_data_sz * num_per_mem)); 2951 2952 pasync_data_h->pa.u.a64.address = 2953 pasync_ctx->async_data.pa_base.u.a64.address + 2954 (p->defpdu_data_sz * num_per_mem); 2955 num_per_mem++; 2956 num_async_data--; 2957 2958 list_add_tail(&pasync_data_h->link, 2959 &pasync_ctx->async_data.free_list); 2960 pasync_data_h++; 2961 pasync_ctx->async_data.free_entries++; 2962 pasync_ctx->async_data.writables++; 2963 2964 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list); 2965 } 2966 2967 pasync_ctx->async_header.host_write_ptr = 0; 2968 pasync_ctx->async_header.ep_read_ptr = -1; 2969 pasync_ctx->async_data.host_write_ptr = 0; 2970 pasync_ctx->async_data.ep_read_ptr = -1; 2971 2972 return 0; 2973 } 2974 2975 static int 2976 be_sgl_create_contiguous(void *virtual_address, 2977 u64 physical_address, u32 length, 2978 struct be_dma_mem *sgl) 2979 { 2980 WARN_ON(!virtual_address); 2981 WARN_ON(!physical_address); 2982 WARN_ON(!length > 0); 2983 WARN_ON(!sgl); 2984 2985 sgl->va = virtual_address; 2986 sgl->dma = (unsigned long)physical_address; 2987 sgl->size = length; 2988 2989 return 0; 2990 } 2991 2992 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 2993 { 2994 memset(sgl, 0, sizeof(*sgl)); 2995 } 2996 2997 static void 2998 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 2999 struct mem_array *pmem, struct be_dma_mem *sgl) 3000 { 3001 if (sgl->va) 3002 be_sgl_destroy_contiguous(sgl); 3003 3004 be_sgl_create_contiguous(pmem->virtual_address, 3005 pmem->bus_address.u.a64.address, 3006 pmem->size, sgl); 3007 } 3008 3009 static void 3010 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 3011 struct mem_array *pmem, struct be_dma_mem *sgl) 3012 { 3013 if (sgl->va) 3014 be_sgl_destroy_contiguous(sgl); 3015 3016 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 3017 pmem->bus_address.u.a64.address, 3018 pmem->size, sgl); 3019 } 3020 3021 static int be_fill_queue(struct be_queue_info *q, 3022 u16 len, u16 entry_size, void *vaddress) 3023 { 3024 struct be_dma_mem *mem = &q->dma_mem; 3025 3026 memset(q, 0, sizeof(*q)); 3027 q->len = len; 3028 q->entry_size = entry_size; 3029 mem->size = len * entry_size; 3030 mem->va = vaddress; 3031 if (!mem->va) 3032 return -ENOMEM; 3033 memset(mem->va, 0, mem->size); 3034 return 0; 3035 } 3036 3037 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3038 struct hwi_context_memory *phwi_context) 3039 { 3040 unsigned int i, num_eq_pages; 3041 int ret = 0, eq_for_mcc; 3042 struct be_queue_info *eq; 3043 struct be_dma_mem *mem; 3044 void *eq_vaddress; 3045 dma_addr_t paddr; 3046 3047 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3048 sizeof(struct be_eq_entry)); 3049 3050 if (phba->msix_enabled) 3051 eq_for_mcc = 1; 3052 else 3053 eq_for_mcc = 0; 3054 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3055 eq = &phwi_context->be_eq[i].q; 3056 mem = &eq->dma_mem; 3057 phwi_context->be_eq[i].phba = phba; 3058 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3059 num_eq_pages * PAGE_SIZE, 3060 &paddr); 3061 if (!eq_vaddress) 3062 goto create_eq_error; 3063 3064 mem->va = eq_vaddress; 3065 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3066 sizeof(struct be_eq_entry), eq_vaddress); 3067 if (ret) { 3068 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3069 "BM_%d : be_fill_queue Failed for EQ\n"); 3070 goto create_eq_error; 3071 } 3072 3073 mem->dma = paddr; 3074 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3075 phwi_context->cur_eqd); 3076 if (ret) { 3077 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3078 "BM_%d : beiscsi_cmd_eq_create" 3079 "Failed for EQ\n"); 3080 goto create_eq_error; 3081 } 3082 3083 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3084 "BM_%d : eqid = %d\n", 3085 phwi_context->be_eq[i].q.id); 3086 } 3087 return 0; 3088 create_eq_error: 3089 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3090 eq = &phwi_context->be_eq[i].q; 3091 mem = &eq->dma_mem; 3092 if (mem->va) 3093 pci_free_consistent(phba->pcidev, num_eq_pages 3094 * PAGE_SIZE, 3095 mem->va, mem->dma); 3096 } 3097 return ret; 3098 } 3099 3100 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3101 struct hwi_context_memory *phwi_context) 3102 { 3103 unsigned int i, num_cq_pages; 3104 int ret = 0; 3105 struct be_queue_info *cq, *eq; 3106 struct be_dma_mem *mem; 3107 struct be_eq_obj *pbe_eq; 3108 void *cq_vaddress; 3109 dma_addr_t paddr; 3110 3111 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3112 sizeof(struct sol_cqe)); 3113 3114 for (i = 0; i < phba->num_cpus; i++) { 3115 cq = &phwi_context->be_cq[i]; 3116 eq = &phwi_context->be_eq[i].q; 3117 pbe_eq = &phwi_context->be_eq[i]; 3118 pbe_eq->cq = cq; 3119 pbe_eq->phba = phba; 3120 mem = &cq->dma_mem; 3121 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3122 num_cq_pages * PAGE_SIZE, 3123 &paddr); 3124 if (!cq_vaddress) 3125 goto create_cq_error; 3126 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3127 sizeof(struct sol_cqe), cq_vaddress); 3128 if (ret) { 3129 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3130 "BM_%d : be_fill_queue Failed " 3131 "for ISCSI CQ\n"); 3132 goto create_cq_error; 3133 } 3134 3135 mem->dma = paddr; 3136 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3137 false, 0); 3138 if (ret) { 3139 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3140 "BM_%d : beiscsi_cmd_eq_create" 3141 "Failed for ISCSI CQ\n"); 3142 goto create_cq_error; 3143 } 3144 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3145 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3146 "iSCSI CQ CREATED\n", cq->id, eq->id); 3147 } 3148 return 0; 3149 3150 create_cq_error: 3151 for (i = 0; i < phba->num_cpus; i++) { 3152 cq = &phwi_context->be_cq[i]; 3153 mem = &cq->dma_mem; 3154 if (mem->va) 3155 pci_free_consistent(phba->pcidev, num_cq_pages 3156 * PAGE_SIZE, 3157 mem->va, mem->dma); 3158 } 3159 return ret; 3160 3161 } 3162 3163 static int 3164 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3165 struct hwi_context_memory *phwi_context, 3166 struct hwi_controller *phwi_ctrlr, 3167 unsigned int def_pdu_ring_sz) 3168 { 3169 unsigned int idx; 3170 int ret; 3171 struct be_queue_info *dq, *cq; 3172 struct be_dma_mem *mem; 3173 struct be_mem_descriptor *mem_descr; 3174 void *dq_vaddress; 3175 3176 idx = 0; 3177 dq = &phwi_context->be_def_hdrq; 3178 cq = &phwi_context->be_cq[0]; 3179 mem = &dq->dma_mem; 3180 mem_descr = phba->init_mem; 3181 mem_descr += HWI_MEM_ASYNC_HEADER_RING; 3182 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3183 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3184 sizeof(struct phys_addr), 3185 sizeof(struct phys_addr), dq_vaddress); 3186 if (ret) { 3187 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3188 "BM_%d : be_fill_queue Failed for DEF PDU HDR\n"); 3189 return ret; 3190 } 3191 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3192 bus_address.u.a64.address; 3193 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3194 def_pdu_ring_sz, 3195 phba->params.defpdu_hdr_sz); 3196 if (ret) { 3197 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3198 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR\n"); 3199 return ret; 3200 } 3201 phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id; 3202 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3203 "BM_%d : iscsi def pdu id is %d\n", 3204 phwi_context->be_def_hdrq.id); 3205 3206 hwi_post_async_buffers(phba, 1); 3207 return 0; 3208 } 3209 3210 static int 3211 beiscsi_create_def_data(struct beiscsi_hba *phba, 3212 struct hwi_context_memory *phwi_context, 3213 struct hwi_controller *phwi_ctrlr, 3214 unsigned int def_pdu_ring_sz) 3215 { 3216 unsigned int idx; 3217 int ret; 3218 struct be_queue_info *dataq, *cq; 3219 struct be_dma_mem *mem; 3220 struct be_mem_descriptor *mem_descr; 3221 void *dq_vaddress; 3222 3223 idx = 0; 3224 dataq = &phwi_context->be_def_dataq; 3225 cq = &phwi_context->be_cq[0]; 3226 mem = &dataq->dma_mem; 3227 mem_descr = phba->init_mem; 3228 mem_descr += HWI_MEM_ASYNC_DATA_RING; 3229 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3230 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3231 sizeof(struct phys_addr), 3232 sizeof(struct phys_addr), dq_vaddress); 3233 if (ret) { 3234 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3235 "BM_%d : be_fill_queue Failed for DEF PDU DATA\n"); 3236 return ret; 3237 } 3238 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3239 bus_address.u.a64.address; 3240 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3241 def_pdu_ring_sz, 3242 phba->params.defpdu_data_sz); 3243 if (ret) { 3244 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3245 "BM_%d be_cmd_create_default_pdu_queue" 3246 " Failed for DEF PDU DATA\n"); 3247 return ret; 3248 } 3249 phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id; 3250 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3251 "BM_%d : iscsi def data id is %d\n", 3252 phwi_context->be_def_dataq.id); 3253 3254 hwi_post_async_buffers(phba, 0); 3255 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3256 "BM_%d : DEFAULT PDU DATA RING CREATED\n"); 3257 3258 return 0; 3259 } 3260 3261 static int 3262 beiscsi_post_pages(struct beiscsi_hba *phba) 3263 { 3264 struct be_mem_descriptor *mem_descr; 3265 struct mem_array *pm_arr; 3266 unsigned int page_offset, i; 3267 struct be_dma_mem sgl; 3268 int status; 3269 3270 mem_descr = phba->init_mem; 3271 mem_descr += HWI_MEM_SGE; 3272 pm_arr = mem_descr->mem_array; 3273 3274 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3275 phba->fw_config.iscsi_icd_start) / PAGE_SIZE; 3276 for (i = 0; i < mem_descr->num_elements; i++) { 3277 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3278 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3279 page_offset, 3280 (pm_arr->size / PAGE_SIZE)); 3281 page_offset += pm_arr->size / PAGE_SIZE; 3282 if (status != 0) { 3283 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3284 "BM_%d : post sgl failed.\n"); 3285 return status; 3286 } 3287 pm_arr++; 3288 } 3289 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3290 "BM_%d : POSTED PAGES\n"); 3291 return 0; 3292 } 3293 3294 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3295 { 3296 struct be_dma_mem *mem = &q->dma_mem; 3297 if (mem->va) { 3298 pci_free_consistent(phba->pcidev, mem->size, 3299 mem->va, mem->dma); 3300 mem->va = NULL; 3301 } 3302 } 3303 3304 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3305 u16 len, u16 entry_size) 3306 { 3307 struct be_dma_mem *mem = &q->dma_mem; 3308 3309 memset(q, 0, sizeof(*q)); 3310 q->len = len; 3311 q->entry_size = entry_size; 3312 mem->size = len * entry_size; 3313 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma); 3314 if (!mem->va) 3315 return -ENOMEM; 3316 memset(mem->va, 0, mem->size); 3317 return 0; 3318 } 3319 3320 static int 3321 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3322 struct hwi_context_memory *phwi_context, 3323 struct hwi_controller *phwi_ctrlr) 3324 { 3325 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3326 u64 pa_addr_lo; 3327 unsigned int idx, num, i; 3328 struct mem_array *pwrb_arr; 3329 void *wrb_vaddr; 3330 struct be_dma_mem sgl; 3331 struct be_mem_descriptor *mem_descr; 3332 struct hwi_wrb_context *pwrb_context; 3333 int status; 3334 3335 idx = 0; 3336 mem_descr = phba->init_mem; 3337 mem_descr += HWI_MEM_WRB; 3338 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3339 GFP_KERNEL); 3340 if (!pwrb_arr) { 3341 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3342 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3343 return -ENOMEM; 3344 } 3345 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3346 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3347 num_wrb_rings = mem_descr->mem_array[idx].size / 3348 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3349 3350 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3351 if (num_wrb_rings) { 3352 pwrb_arr[num].virtual_address = wrb_vaddr; 3353 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3354 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3355 sizeof(struct iscsi_wrb); 3356 wrb_vaddr += pwrb_arr[num].size; 3357 pa_addr_lo += pwrb_arr[num].size; 3358 num_wrb_rings--; 3359 } else { 3360 idx++; 3361 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3362 pa_addr_lo = mem_descr->mem_array[idx].\ 3363 bus_address.u.a64.address; 3364 num_wrb_rings = mem_descr->mem_array[idx].size / 3365 (phba->params.wrbs_per_cxn * 3366 sizeof(struct iscsi_wrb)); 3367 pwrb_arr[num].virtual_address = wrb_vaddr; 3368 pwrb_arr[num].bus_address.u.a64.address\ 3369 = pa_addr_lo; 3370 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3371 sizeof(struct iscsi_wrb); 3372 wrb_vaddr += pwrb_arr[num].size; 3373 pa_addr_lo += pwrb_arr[num].size; 3374 num_wrb_rings--; 3375 } 3376 } 3377 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3378 wrb_mem_index = 0; 3379 offset = 0; 3380 size = 0; 3381 3382 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3383 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3384 &phwi_context->be_wrbq[i]); 3385 if (status != 0) { 3386 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3387 "BM_%d : wrbq create failed."); 3388 kfree(pwrb_arr); 3389 return status; 3390 } 3391 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3392 pwrb_context->cid = phwi_context->be_wrbq[i].id; 3393 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3394 } 3395 kfree(pwrb_arr); 3396 return 0; 3397 } 3398 3399 static void free_wrb_handles(struct beiscsi_hba *phba) 3400 { 3401 unsigned int index; 3402 struct hwi_controller *phwi_ctrlr; 3403 struct hwi_wrb_context *pwrb_context; 3404 3405 phwi_ctrlr = phba->phwi_ctrlr; 3406 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3407 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3408 kfree(pwrb_context->pwrb_handle_base); 3409 kfree(pwrb_context->pwrb_handle_basestd); 3410 } 3411 } 3412 3413 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3414 { 3415 struct be_queue_info *q; 3416 struct be_ctrl_info *ctrl = &phba->ctrl; 3417 3418 q = &phba->ctrl.mcc_obj.q; 3419 if (q->created) 3420 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3421 be_queue_free(phba, q); 3422 3423 q = &phba->ctrl.mcc_obj.cq; 3424 if (q->created) 3425 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3426 be_queue_free(phba, q); 3427 } 3428 3429 static void hwi_cleanup(struct beiscsi_hba *phba) 3430 { 3431 struct be_queue_info *q; 3432 struct be_ctrl_info *ctrl = &phba->ctrl; 3433 struct hwi_controller *phwi_ctrlr; 3434 struct hwi_context_memory *phwi_context; 3435 struct hwi_async_pdu_context *pasync_ctx; 3436 int i, eq_num; 3437 3438 phwi_ctrlr = phba->phwi_ctrlr; 3439 phwi_context = phwi_ctrlr->phwi_ctxt; 3440 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3441 q = &phwi_context->be_wrbq[i]; 3442 if (q->created) 3443 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3444 } 3445 kfree(phwi_context->be_wrbq); 3446 free_wrb_handles(phba); 3447 3448 q = &phwi_context->be_def_hdrq; 3449 if (q->created) 3450 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3451 3452 q = &phwi_context->be_def_dataq; 3453 if (q->created) 3454 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3455 3456 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3457 3458 for (i = 0; i < (phba->num_cpus); i++) { 3459 q = &phwi_context->be_cq[i]; 3460 if (q->created) 3461 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3462 } 3463 if (phba->msix_enabled) 3464 eq_num = 1; 3465 else 3466 eq_num = 0; 3467 for (i = 0; i < (phba->num_cpus + eq_num); i++) { 3468 q = &phwi_context->be_eq[i].q; 3469 if (q->created) 3470 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3471 } 3472 be_mcc_queues_destroy(phba); 3473 3474 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; 3475 kfree(pasync_ctx->async_entry); 3476 be_cmd_fw_uninit(ctrl); 3477 } 3478 3479 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3480 struct hwi_context_memory *phwi_context) 3481 { 3482 struct be_queue_info *q, *cq; 3483 struct be_ctrl_info *ctrl = &phba->ctrl; 3484 3485 /* Alloc MCC compl queue */ 3486 cq = &phba->ctrl.mcc_obj.cq; 3487 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3488 sizeof(struct be_mcc_compl))) 3489 goto err; 3490 /* Ask BE to create MCC compl queue; */ 3491 if (phba->msix_enabled) { 3492 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3493 [phba->num_cpus].q, false, true, 0)) 3494 goto mcc_cq_free; 3495 } else { 3496 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3497 false, true, 0)) 3498 goto mcc_cq_free; 3499 } 3500 3501 /* Alloc MCC queue */ 3502 q = &phba->ctrl.mcc_obj.q; 3503 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3504 goto mcc_cq_destroy; 3505 3506 /* Ask BE to create MCC queue */ 3507 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3508 goto mcc_q_free; 3509 3510 return 0; 3511 3512 mcc_q_free: 3513 be_queue_free(phba, q); 3514 mcc_cq_destroy: 3515 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3516 mcc_cq_free: 3517 be_queue_free(phba, cq); 3518 err: 3519 return -ENOMEM; 3520 } 3521 3522 /** 3523 * find_num_cpus()- Get the CPU online count 3524 * @phba: ptr to priv structure 3525 * 3526 * CPU count is used for creating EQ. 3527 **/ 3528 static void find_num_cpus(struct beiscsi_hba *phba) 3529 { 3530 int num_cpus = 0; 3531 3532 num_cpus = num_online_cpus(); 3533 3534 switch (phba->generation) { 3535 case BE_GEN2: 3536 case BE_GEN3: 3537 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? 3538 BEISCSI_MAX_NUM_CPUS : num_cpus; 3539 break; 3540 case BE_GEN4: 3541 phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ? 3542 OC_SKH_MAX_NUM_CPUS : num_cpus; 3543 break; 3544 default: 3545 phba->num_cpus = 1; 3546 } 3547 } 3548 3549 static int hwi_init_port(struct beiscsi_hba *phba) 3550 { 3551 struct hwi_controller *phwi_ctrlr; 3552 struct hwi_context_memory *phwi_context; 3553 unsigned int def_pdu_ring_sz; 3554 struct be_ctrl_info *ctrl = &phba->ctrl; 3555 int status; 3556 3557 def_pdu_ring_sz = 3558 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr); 3559 phwi_ctrlr = phba->phwi_ctrlr; 3560 phwi_context = phwi_ctrlr->phwi_ctxt; 3561 phwi_context->max_eqd = 0; 3562 phwi_context->min_eqd = 0; 3563 phwi_context->cur_eqd = 64; 3564 be_cmd_fw_initialize(&phba->ctrl); 3565 3566 status = beiscsi_create_eqs(phba, phwi_context); 3567 if (status != 0) { 3568 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3569 "BM_%d : EQ not created\n"); 3570 goto error; 3571 } 3572 3573 status = be_mcc_queues_create(phba, phwi_context); 3574 if (status != 0) 3575 goto error; 3576 3577 status = mgmt_check_supported_fw(ctrl, phba); 3578 if (status != 0) { 3579 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3580 "BM_%d : Unsupported fw version\n"); 3581 goto error; 3582 } 3583 3584 status = beiscsi_create_cqs(phba, phwi_context); 3585 if (status != 0) { 3586 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3587 "BM_%d : CQ not created\n"); 3588 goto error; 3589 } 3590 3591 status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr, 3592 def_pdu_ring_sz); 3593 if (status != 0) { 3594 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3595 "BM_%d : Default Header not created\n"); 3596 goto error; 3597 } 3598 3599 status = beiscsi_create_def_data(phba, phwi_context, 3600 phwi_ctrlr, def_pdu_ring_sz); 3601 if (status != 0) { 3602 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3603 "BM_%d : Default Data not created\n"); 3604 goto error; 3605 } 3606 3607 status = beiscsi_post_pages(phba); 3608 if (status != 0) { 3609 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3610 "BM_%d : Post SGL Pages Failed\n"); 3611 goto error; 3612 } 3613 3614 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3615 if (status != 0) { 3616 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3617 "BM_%d : WRB Rings not created\n"); 3618 goto error; 3619 } 3620 3621 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3622 "BM_%d : hwi_init_port success\n"); 3623 return 0; 3624 3625 error: 3626 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3627 "BM_%d : hwi_init_port failed"); 3628 hwi_cleanup(phba); 3629 return status; 3630 } 3631 3632 static int hwi_init_controller(struct beiscsi_hba *phba) 3633 { 3634 struct hwi_controller *phwi_ctrlr; 3635 3636 phwi_ctrlr = phba->phwi_ctrlr; 3637 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3638 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3639 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3640 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3641 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3642 phwi_ctrlr->phwi_ctxt); 3643 } else { 3644 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3645 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3646 "than one element.Failing to load\n"); 3647 return -ENOMEM; 3648 } 3649 3650 iscsi_init_global_templates(phba); 3651 if (beiscsi_init_wrb_handle(phba)) 3652 return -ENOMEM; 3653 3654 if (hwi_init_async_pdu_ctx(phba)) { 3655 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3656 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3657 return -ENOMEM; 3658 } 3659 3660 if (hwi_init_port(phba) != 0) { 3661 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3662 "BM_%d : hwi_init_controller failed\n"); 3663 3664 return -ENOMEM; 3665 } 3666 return 0; 3667 } 3668 3669 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3670 { 3671 struct be_mem_descriptor *mem_descr; 3672 int i, j; 3673 3674 mem_descr = phba->init_mem; 3675 i = 0; 3676 j = 0; 3677 for (i = 0; i < SE_MEM_MAX; i++) { 3678 for (j = mem_descr->num_elements; j > 0; j--) { 3679 pci_free_consistent(phba->pcidev, 3680 mem_descr->mem_array[j - 1].size, 3681 mem_descr->mem_array[j - 1].virtual_address, 3682 (unsigned long)mem_descr->mem_array[j - 1]. 3683 bus_address.u.a64.address); 3684 } 3685 kfree(mem_descr->mem_array); 3686 mem_descr++; 3687 } 3688 kfree(phba->init_mem); 3689 kfree(phba->phwi_ctrlr->wrb_context); 3690 kfree(phba->phwi_ctrlr); 3691 } 3692 3693 static int beiscsi_init_controller(struct beiscsi_hba *phba) 3694 { 3695 int ret = -ENOMEM; 3696 3697 ret = beiscsi_get_memory(phba); 3698 if (ret < 0) { 3699 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3700 "BM_%d : beiscsi_dev_probe -" 3701 "Failed in beiscsi_alloc_memory\n"); 3702 return ret; 3703 } 3704 3705 ret = hwi_init_controller(phba); 3706 if (ret) 3707 goto free_init; 3708 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3709 "BM_%d : Return success from beiscsi_init_controller"); 3710 3711 return 0; 3712 3713 free_init: 3714 beiscsi_free_mem(phba); 3715 return ret; 3716 } 3717 3718 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 3719 { 3720 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 3721 struct sgl_handle *psgl_handle; 3722 struct iscsi_sge *pfrag; 3723 unsigned int arr_index, i, idx; 3724 3725 phba->io_sgl_hndl_avbl = 0; 3726 phba->eh_sgl_hndl_avbl = 0; 3727 3728 mem_descr_sglh = phba->init_mem; 3729 mem_descr_sglh += HWI_MEM_SGLH; 3730 if (1 == mem_descr_sglh->num_elements) { 3731 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3732 phba->params.ios_per_ctrl, 3733 GFP_KERNEL); 3734 if (!phba->io_sgl_hndl_base) { 3735 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3736 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3737 return -ENOMEM; 3738 } 3739 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 3740 (phba->params.icds_per_ctrl - 3741 phba->params.ios_per_ctrl), 3742 GFP_KERNEL); 3743 if (!phba->eh_sgl_hndl_base) { 3744 kfree(phba->io_sgl_hndl_base); 3745 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3746 "BM_%d : Mem Alloc Failed. Failing to load\n"); 3747 return -ENOMEM; 3748 } 3749 } else { 3750 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3751 "BM_%d : HWI_MEM_SGLH is more than one element." 3752 "Failing to load\n"); 3753 return -ENOMEM; 3754 } 3755 3756 arr_index = 0; 3757 idx = 0; 3758 while (idx < mem_descr_sglh->num_elements) { 3759 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 3760 3761 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 3762 sizeof(struct sgl_handle)); i++) { 3763 if (arr_index < phba->params.ios_per_ctrl) { 3764 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 3765 phba->io_sgl_hndl_avbl++; 3766 arr_index++; 3767 } else { 3768 phba->eh_sgl_hndl_base[arr_index - 3769 phba->params.ios_per_ctrl] = 3770 psgl_handle; 3771 arr_index++; 3772 phba->eh_sgl_hndl_avbl++; 3773 } 3774 psgl_handle++; 3775 } 3776 idx++; 3777 } 3778 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3779 "BM_%d : phba->io_sgl_hndl_avbl=%d" 3780 "phba->eh_sgl_hndl_avbl=%d\n", 3781 phba->io_sgl_hndl_avbl, 3782 phba->eh_sgl_hndl_avbl); 3783 3784 mem_descr_sg = phba->init_mem; 3785 mem_descr_sg += HWI_MEM_SGE; 3786 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3787 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 3788 mem_descr_sg->num_elements); 3789 3790 arr_index = 0; 3791 idx = 0; 3792 while (idx < mem_descr_sg->num_elements) { 3793 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 3794 3795 for (i = 0; 3796 i < (mem_descr_sg->mem_array[idx].size) / 3797 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 3798 i++) { 3799 if (arr_index < phba->params.ios_per_ctrl) 3800 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 3801 else 3802 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 3803 phba->params.ios_per_ctrl]; 3804 psgl_handle->pfrag = pfrag; 3805 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 3806 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 3807 pfrag += phba->params.num_sge_per_io; 3808 psgl_handle->sgl_index = 3809 phba->fw_config.iscsi_icd_start + arr_index++; 3810 } 3811 idx++; 3812 } 3813 phba->io_sgl_free_index = 0; 3814 phba->io_sgl_alloc_index = 0; 3815 phba->eh_sgl_free_index = 0; 3816 phba->eh_sgl_alloc_index = 0; 3817 return 0; 3818 } 3819 3820 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 3821 { 3822 int i; 3823 3824 phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, 3825 GFP_KERNEL); 3826 if (!phba->cid_array) { 3827 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3828 "BM_%d : Failed to allocate memory in " 3829 "hba_setup_cid_tbls\n"); 3830 return -ENOMEM; 3831 } 3832 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 3833 phba->params.cxns_per_ctrl, GFP_KERNEL); 3834 if (!phba->ep_array) { 3835 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3836 "BM_%d : Failed to allocate memory in " 3837 "hba_setup_cid_tbls\n"); 3838 kfree(phba->cid_array); 3839 phba->cid_array = NULL; 3840 return -ENOMEM; 3841 } 3842 3843 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 3844 phba->params.cxns_per_ctrl, GFP_KERNEL); 3845 if (!phba->conn_table) { 3846 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3847 "BM_%d : Failed to allocate memory in" 3848 "hba_setup_cid_tbls\n"); 3849 3850 kfree(phba->cid_array); 3851 kfree(phba->ep_array); 3852 phba->cid_array = NULL; 3853 phba->ep_array = NULL; 3854 return -ENOMEM; 3855 } 3856 3857 for (i = 0; i < phba->params.cxns_per_ctrl; i++) 3858 phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; 3859 3860 phba->avlbl_cids = phba->params.cxns_per_ctrl; 3861 return 0; 3862 } 3863 3864 static void hwi_enable_intr(struct beiscsi_hba *phba) 3865 { 3866 struct be_ctrl_info *ctrl = &phba->ctrl; 3867 struct hwi_controller *phwi_ctrlr; 3868 struct hwi_context_memory *phwi_context; 3869 struct be_queue_info *eq; 3870 u8 __iomem *addr; 3871 u32 reg, i; 3872 u32 enabled; 3873 3874 phwi_ctrlr = phba->phwi_ctrlr; 3875 phwi_context = phwi_ctrlr->phwi_ctxt; 3876 3877 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 3878 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 3879 reg = ioread32(addr); 3880 3881 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3882 if (!enabled) { 3883 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3884 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3885 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 3886 iowrite32(reg, addr); 3887 } 3888 3889 if (!phba->msix_enabled) { 3890 eq = &phwi_context->be_eq[0].q; 3891 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3892 "BM_%d : eq->id=%d\n", eq->id); 3893 3894 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3895 } else { 3896 for (i = 0; i <= phba->num_cpus; i++) { 3897 eq = &phwi_context->be_eq[i].q; 3898 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3899 "BM_%d : eq->id=%d\n", eq->id); 3900 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 3901 } 3902 } 3903 } 3904 3905 static void hwi_disable_intr(struct beiscsi_hba *phba) 3906 { 3907 struct be_ctrl_info *ctrl = &phba->ctrl; 3908 3909 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 3910 u32 reg = ioread32(addr); 3911 3912 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3913 if (enabled) { 3914 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 3915 iowrite32(reg, addr); 3916 } else 3917 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 3918 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 3919 } 3920 3921 /** 3922 * beiscsi_get_boot_info()- Get the boot session info 3923 * @phba: The device priv structure instance 3924 * 3925 * Get the boot target info and store in driver priv structure 3926 * 3927 * return values 3928 * Success: 0 3929 * Failure: Non-Zero Value 3930 **/ 3931 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 3932 { 3933 struct be_cmd_get_session_resp *session_resp; 3934 struct be_dma_mem nonemb_cmd; 3935 unsigned int tag; 3936 unsigned int s_handle; 3937 int ret = -ENOMEM; 3938 3939 /* Get the session handle of the boot target */ 3940 ret = be_mgmt_get_boot_shandle(phba, &s_handle); 3941 if (ret) { 3942 beiscsi_log(phba, KERN_ERR, 3943 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3944 "BM_%d : No boot session\n"); 3945 return ret; 3946 } 3947 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 3948 sizeof(*session_resp), 3949 &nonemb_cmd.dma); 3950 if (nonemb_cmd.va == NULL) { 3951 beiscsi_log(phba, KERN_ERR, 3952 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3953 "BM_%d : Failed to allocate memory for" 3954 "beiscsi_get_session_info\n"); 3955 3956 return -ENOMEM; 3957 } 3958 3959 memset(nonemb_cmd.va, 0, sizeof(*session_resp)); 3960 tag = mgmt_get_session_info(phba, s_handle, 3961 &nonemb_cmd); 3962 if (!tag) { 3963 beiscsi_log(phba, KERN_ERR, 3964 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3965 "BM_%d : beiscsi_get_session_info" 3966 " Failed\n"); 3967 3968 goto boot_freemem; 3969 } 3970 3971 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); 3972 if (ret) { 3973 beiscsi_log(phba, KERN_ERR, 3974 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 3975 "BM_%d : beiscsi_get_session_info Failed"); 3976 goto boot_freemem; 3977 } 3978 3979 session_resp = nonemb_cmd.va ; 3980 3981 memcpy(&phba->boot_sess, &session_resp->session_info, 3982 sizeof(struct mgmt_session_info)); 3983 ret = 0; 3984 3985 boot_freemem: 3986 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 3987 nonemb_cmd.va, nonemb_cmd.dma); 3988 return ret; 3989 } 3990 3991 static void beiscsi_boot_release(void *data) 3992 { 3993 struct beiscsi_hba *phba = data; 3994 3995 scsi_host_put(phba->shost); 3996 } 3997 3998 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 3999 { 4000 struct iscsi_boot_kobj *boot_kobj; 4001 4002 /* get boot info using mgmt cmd */ 4003 if (beiscsi_get_boot_info(phba)) 4004 /* Try to see if we can carry on without this */ 4005 return 0; 4006 4007 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 4008 if (!phba->boot_kset) 4009 return -ENOMEM; 4010 4011 /* get a ref because the show function will ref the phba */ 4012 if (!scsi_host_get(phba->shost)) 4013 goto free_kset; 4014 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 4015 beiscsi_show_boot_tgt_info, 4016 beiscsi_tgt_get_attr_visibility, 4017 beiscsi_boot_release); 4018 if (!boot_kobj) 4019 goto put_shost; 4020 4021 if (!scsi_host_get(phba->shost)) 4022 goto free_kset; 4023 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 4024 beiscsi_show_boot_ini_info, 4025 beiscsi_ini_get_attr_visibility, 4026 beiscsi_boot_release); 4027 if (!boot_kobj) 4028 goto put_shost; 4029 4030 if (!scsi_host_get(phba->shost)) 4031 goto free_kset; 4032 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 4033 beiscsi_show_boot_eth_info, 4034 beiscsi_eth_get_attr_visibility, 4035 beiscsi_boot_release); 4036 if (!boot_kobj) 4037 goto put_shost; 4038 return 0; 4039 4040 put_shost: 4041 scsi_host_put(phba->shost); 4042 free_kset: 4043 iscsi_boot_destroy_kset(phba->boot_kset); 4044 return -ENOMEM; 4045 } 4046 4047 static int beiscsi_init_port(struct beiscsi_hba *phba) 4048 { 4049 int ret; 4050 4051 ret = beiscsi_init_controller(phba); 4052 if (ret < 0) { 4053 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4054 "BM_%d : beiscsi_dev_probe - Failed in" 4055 "beiscsi_init_controller\n"); 4056 return ret; 4057 } 4058 ret = beiscsi_init_sgl_handle(phba); 4059 if (ret < 0) { 4060 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4061 "BM_%d : beiscsi_dev_probe - Failed in" 4062 "beiscsi_init_sgl_handle\n"); 4063 goto do_cleanup_ctrlr; 4064 } 4065 4066 if (hba_setup_cid_tbls(phba)) { 4067 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4068 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4069 kfree(phba->io_sgl_hndl_base); 4070 kfree(phba->eh_sgl_hndl_base); 4071 goto do_cleanup_ctrlr; 4072 } 4073 4074 return ret; 4075 4076 do_cleanup_ctrlr: 4077 hwi_cleanup(phba); 4078 return ret; 4079 } 4080 4081 static void hwi_purge_eq(struct beiscsi_hba *phba) 4082 { 4083 struct hwi_controller *phwi_ctrlr; 4084 struct hwi_context_memory *phwi_context; 4085 struct be_queue_info *eq; 4086 struct be_eq_entry *eqe = NULL; 4087 int i, eq_msix; 4088 unsigned int num_processed; 4089 4090 phwi_ctrlr = phba->phwi_ctrlr; 4091 phwi_context = phwi_ctrlr->phwi_ctxt; 4092 if (phba->msix_enabled) 4093 eq_msix = 1; 4094 else 4095 eq_msix = 0; 4096 4097 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 4098 eq = &phwi_context->be_eq[i].q; 4099 eqe = queue_tail_node(eq); 4100 num_processed = 0; 4101 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 4102 & EQE_VALID_MASK) { 4103 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 4104 queue_tail_inc(eq); 4105 eqe = queue_tail_node(eq); 4106 num_processed++; 4107 } 4108 4109 if (num_processed) 4110 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 4111 } 4112 } 4113 4114 static void beiscsi_clean_port(struct beiscsi_hba *phba) 4115 { 4116 int mgmt_status; 4117 4118 mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0); 4119 if (mgmt_status) 4120 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4121 "BM_%d : mgmt_epfw_cleanup FAILED\n"); 4122 4123 hwi_purge_eq(phba); 4124 hwi_cleanup(phba); 4125 kfree(phba->io_sgl_hndl_base); 4126 kfree(phba->eh_sgl_hndl_base); 4127 kfree(phba->cid_array); 4128 kfree(phba->ep_array); 4129 kfree(phba->conn_table); 4130 } 4131 4132 /** 4133 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4134 * @beiscsi_conn: ptr to the conn to be cleaned up 4135 * @task: ptr to iscsi_task resource to be freed. 4136 * 4137 * Free driver mgmt resources binded to CXN. 4138 **/ 4139 void 4140 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4141 struct iscsi_task *task) 4142 { 4143 struct beiscsi_io_task *io_task; 4144 struct beiscsi_hba *phba = beiscsi_conn->phba; 4145 struct hwi_wrb_context *pwrb_context; 4146 struct hwi_controller *phwi_ctrlr; 4147 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4148 beiscsi_conn->beiscsi_conn_cid); 4149 4150 phwi_ctrlr = phba->phwi_ctrlr; 4151 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4152 4153 io_task = task->dd_data; 4154 4155 if (io_task->pwrb_handle) { 4156 memset(io_task->pwrb_handle->pwrb, 0, 4157 sizeof(struct iscsi_wrb)); 4158 free_wrb_handle(phba, pwrb_context, 4159 io_task->pwrb_handle); 4160 io_task->pwrb_handle = NULL; 4161 } 4162 4163 if (io_task->psgl_handle) { 4164 spin_lock_bh(&phba->mgmt_sgl_lock); 4165 free_mgmt_sgl_handle(phba, 4166 io_task->psgl_handle); 4167 io_task->psgl_handle = NULL; 4168 spin_unlock_bh(&phba->mgmt_sgl_lock); 4169 } 4170 4171 if (io_task->mtask_addr) 4172 pci_unmap_single(phba->pcidev, 4173 io_task->mtask_addr, 4174 io_task->mtask_data_count, 4175 PCI_DMA_TODEVICE); 4176 } 4177 4178 /** 4179 * beiscsi_cleanup_task()- Free driver resources of the task 4180 * @task: ptr to the iscsi task 4181 * 4182 **/ 4183 static void beiscsi_cleanup_task(struct iscsi_task *task) 4184 { 4185 struct beiscsi_io_task *io_task = task->dd_data; 4186 struct iscsi_conn *conn = task->conn; 4187 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4188 struct beiscsi_hba *phba = beiscsi_conn->phba; 4189 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4190 struct hwi_wrb_context *pwrb_context; 4191 struct hwi_controller *phwi_ctrlr; 4192 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4193 beiscsi_conn->beiscsi_conn_cid); 4194 4195 phwi_ctrlr = phba->phwi_ctrlr; 4196 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4197 4198 if (io_task->cmd_bhs) { 4199 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4200 io_task->bhs_pa.u.a64.address); 4201 io_task->cmd_bhs = NULL; 4202 } 4203 4204 if (task->sc) { 4205 if (io_task->pwrb_handle) { 4206 free_wrb_handle(phba, pwrb_context, 4207 io_task->pwrb_handle); 4208 io_task->pwrb_handle = NULL; 4209 } 4210 4211 if (io_task->psgl_handle) { 4212 spin_lock(&phba->io_sgl_lock); 4213 free_io_sgl_handle(phba, io_task->psgl_handle); 4214 spin_unlock(&phba->io_sgl_lock); 4215 io_task->psgl_handle = NULL; 4216 } 4217 } else { 4218 if (!beiscsi_conn->login_in_progress) 4219 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4220 } 4221 } 4222 4223 void 4224 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4225 struct beiscsi_offload_params *params) 4226 { 4227 struct wrb_handle *pwrb_handle; 4228 struct beiscsi_hba *phba = beiscsi_conn->phba; 4229 struct iscsi_task *task = beiscsi_conn->task; 4230 struct iscsi_session *session = task->conn->session; 4231 u32 doorbell = 0; 4232 4233 /* 4234 * We can always use 0 here because it is reserved by libiscsi for 4235 * login/startup related tasks. 4236 */ 4237 beiscsi_conn->login_in_progress = 0; 4238 spin_lock_bh(&session->lock); 4239 beiscsi_cleanup_task(task); 4240 spin_unlock_bh(&session->lock); 4241 4242 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); 4243 4244 /* Check for the adapter family */ 4245 if (is_chip_be2_be3r(phba)) 4246 beiscsi_offload_cxn_v0(params, pwrb_handle, 4247 phba->init_mem); 4248 else 4249 beiscsi_offload_cxn_v2(params, pwrb_handle); 4250 4251 be_dws_le_to_cpu(pwrb_handle->pwrb, 4252 sizeof(struct iscsi_target_context_update_wrb)); 4253 4254 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4255 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4256 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4257 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4258 4259 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4260 } 4261 4262 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4263 int *index, int *age) 4264 { 4265 *index = (int)itt; 4266 if (age) 4267 *age = conn->session->age; 4268 } 4269 4270 /** 4271 * beiscsi_alloc_pdu - allocates pdu and related resources 4272 * @task: libiscsi task 4273 * @opcode: opcode of pdu for task 4274 * 4275 * This is called with the session lock held. It will allocate 4276 * the wrb and sgl if needed for the command. And it will prep 4277 * the pdu's itt. beiscsi_parse_pdu will later translate 4278 * the pdu itt to the libiscsi task itt. 4279 */ 4280 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4281 { 4282 struct beiscsi_io_task *io_task = task->dd_data; 4283 struct iscsi_conn *conn = task->conn; 4284 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4285 struct beiscsi_hba *phba = beiscsi_conn->phba; 4286 struct hwi_wrb_context *pwrb_context; 4287 struct hwi_controller *phwi_ctrlr; 4288 itt_t itt; 4289 uint16_t cri_index = 0; 4290 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4291 dma_addr_t paddr; 4292 4293 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4294 GFP_ATOMIC, &paddr); 4295 if (!io_task->cmd_bhs) 4296 return -ENOMEM; 4297 io_task->bhs_pa.u.a64.address = paddr; 4298 io_task->libiscsi_itt = (itt_t)task->itt; 4299 io_task->conn = beiscsi_conn; 4300 4301 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4302 task->hdr_max = sizeof(struct be_cmd_bhs); 4303 io_task->psgl_handle = NULL; 4304 io_task->pwrb_handle = NULL; 4305 4306 if (task->sc) { 4307 spin_lock(&phba->io_sgl_lock); 4308 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4309 spin_unlock(&phba->io_sgl_lock); 4310 if (!io_task->psgl_handle) { 4311 beiscsi_log(phba, KERN_ERR, 4312 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4313 "BM_%d : Alloc of IO_SGL_ICD Failed" 4314 "for the CID : %d\n", 4315 beiscsi_conn->beiscsi_conn_cid); 4316 goto free_hndls; 4317 } 4318 io_task->pwrb_handle = alloc_wrb_handle(phba, 4319 beiscsi_conn->beiscsi_conn_cid); 4320 if (!io_task->pwrb_handle) { 4321 beiscsi_log(phba, KERN_ERR, 4322 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4323 "BM_%d : Alloc of WRB_HANDLE Failed" 4324 "for the CID : %d\n", 4325 beiscsi_conn->beiscsi_conn_cid); 4326 goto free_io_hndls; 4327 } 4328 } else { 4329 io_task->scsi_cmnd = NULL; 4330 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4331 beiscsi_conn->task = task; 4332 if (!beiscsi_conn->login_in_progress) { 4333 spin_lock(&phba->mgmt_sgl_lock); 4334 io_task->psgl_handle = (struct sgl_handle *) 4335 alloc_mgmt_sgl_handle(phba); 4336 spin_unlock(&phba->mgmt_sgl_lock); 4337 if (!io_task->psgl_handle) { 4338 beiscsi_log(phba, KERN_ERR, 4339 BEISCSI_LOG_IO | 4340 BEISCSI_LOG_CONFIG, 4341 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4342 "for the CID : %d\n", 4343 beiscsi_conn-> 4344 beiscsi_conn_cid); 4345 goto free_hndls; 4346 } 4347 4348 beiscsi_conn->login_in_progress = 1; 4349 beiscsi_conn->plogin_sgl_handle = 4350 io_task->psgl_handle; 4351 io_task->pwrb_handle = 4352 alloc_wrb_handle(phba, 4353 beiscsi_conn->beiscsi_conn_cid); 4354 if (!io_task->pwrb_handle) { 4355 beiscsi_log(phba, KERN_ERR, 4356 BEISCSI_LOG_IO | 4357 BEISCSI_LOG_CONFIG, 4358 "BM_%d : Alloc of WRB_HANDLE Failed" 4359 "for the CID : %d\n", 4360 beiscsi_conn-> 4361 beiscsi_conn_cid); 4362 goto free_mgmt_hndls; 4363 } 4364 beiscsi_conn->plogin_wrb_handle = 4365 io_task->pwrb_handle; 4366 4367 } else { 4368 io_task->psgl_handle = 4369 beiscsi_conn->plogin_sgl_handle; 4370 io_task->pwrb_handle = 4371 beiscsi_conn->plogin_wrb_handle; 4372 } 4373 } else { 4374 spin_lock(&phba->mgmt_sgl_lock); 4375 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4376 spin_unlock(&phba->mgmt_sgl_lock); 4377 if (!io_task->psgl_handle) { 4378 beiscsi_log(phba, KERN_ERR, 4379 BEISCSI_LOG_IO | 4380 BEISCSI_LOG_CONFIG, 4381 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4382 "for the CID : %d\n", 4383 beiscsi_conn-> 4384 beiscsi_conn_cid); 4385 goto free_hndls; 4386 } 4387 io_task->pwrb_handle = 4388 alloc_wrb_handle(phba, 4389 beiscsi_conn->beiscsi_conn_cid); 4390 if (!io_task->pwrb_handle) { 4391 beiscsi_log(phba, KERN_ERR, 4392 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4393 "BM_%d : Alloc of WRB_HANDLE Failed" 4394 "for the CID : %d\n", 4395 beiscsi_conn->beiscsi_conn_cid); 4396 goto free_mgmt_hndls; 4397 } 4398 4399 } 4400 } 4401 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4402 wrb_index << 16) | (unsigned int) 4403 (io_task->psgl_handle->sgl_index)); 4404 io_task->pwrb_handle->pio_handle = task; 4405 4406 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4407 return 0; 4408 4409 free_io_hndls: 4410 spin_lock(&phba->io_sgl_lock); 4411 free_io_sgl_handle(phba, io_task->psgl_handle); 4412 spin_unlock(&phba->io_sgl_lock); 4413 goto free_hndls; 4414 free_mgmt_hndls: 4415 spin_lock(&phba->mgmt_sgl_lock); 4416 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4417 io_task->psgl_handle = NULL; 4418 spin_unlock(&phba->mgmt_sgl_lock); 4419 free_hndls: 4420 phwi_ctrlr = phba->phwi_ctrlr; 4421 cri_index = BE_GET_CRI_FROM_CID( 4422 beiscsi_conn->beiscsi_conn_cid); 4423 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4424 if (io_task->pwrb_handle) 4425 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4426 io_task->pwrb_handle = NULL; 4427 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4428 io_task->bhs_pa.u.a64.address); 4429 io_task->cmd_bhs = NULL; 4430 return -ENOMEM; 4431 } 4432 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4433 unsigned int num_sg, unsigned int xferlen, 4434 unsigned int writedir) 4435 { 4436 4437 struct beiscsi_io_task *io_task = task->dd_data; 4438 struct iscsi_conn *conn = task->conn; 4439 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4440 struct beiscsi_hba *phba = beiscsi_conn->phba; 4441 struct iscsi_wrb *pwrb = NULL; 4442 unsigned int doorbell = 0; 4443 4444 pwrb = io_task->pwrb_handle->pwrb; 4445 4446 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4447 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4448 4449 if (writedir) { 4450 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4451 INI_WR_CMD); 4452 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4453 } else { 4454 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4455 INI_RD_CMD); 4456 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4457 } 4458 4459 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4460 type, pwrb); 4461 4462 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4463 cpu_to_be16(*(unsigned short *) 4464 &io_task->cmd_bhs->iscsi_hdr.lun)); 4465 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4466 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4467 io_task->pwrb_handle->wrb_index); 4468 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4469 be32_to_cpu(task->cmdsn)); 4470 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4471 io_task->psgl_handle->sgl_index); 4472 4473 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4474 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4475 io_task->pwrb_handle->nxt_wrb_index); 4476 4477 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4478 4479 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4480 doorbell |= (io_task->pwrb_handle->wrb_index & 4481 DB_DEF_PDU_WRB_INDEX_MASK) << 4482 DB_DEF_PDU_WRB_INDEX_SHIFT; 4483 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4484 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4485 return 0; 4486 } 4487 4488 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4489 unsigned int num_sg, unsigned int xferlen, 4490 unsigned int writedir) 4491 { 4492 4493 struct beiscsi_io_task *io_task = task->dd_data; 4494 struct iscsi_conn *conn = task->conn; 4495 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4496 struct beiscsi_hba *phba = beiscsi_conn->phba; 4497 struct iscsi_wrb *pwrb = NULL; 4498 unsigned int doorbell = 0; 4499 4500 pwrb = io_task->pwrb_handle->pwrb; 4501 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4502 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4503 4504 if (writedir) { 4505 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4506 INI_WR_CMD); 4507 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4508 } else { 4509 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4510 INI_RD_CMD); 4511 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4512 } 4513 4514 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4515 type, pwrb); 4516 4517 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4518 cpu_to_be16(*(unsigned short *) 4519 &io_task->cmd_bhs->iscsi_hdr.lun)); 4520 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4521 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4522 io_task->pwrb_handle->wrb_index); 4523 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4524 be32_to_cpu(task->cmdsn)); 4525 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4526 io_task->psgl_handle->sgl_index); 4527 4528 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4529 4530 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4531 io_task->pwrb_handle->nxt_wrb_index); 4532 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4533 4534 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4535 doorbell |= (io_task->pwrb_handle->wrb_index & 4536 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4537 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4538 4539 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4540 return 0; 4541 } 4542 4543 static int beiscsi_mtask(struct iscsi_task *task) 4544 { 4545 struct beiscsi_io_task *io_task = task->dd_data; 4546 struct iscsi_conn *conn = task->conn; 4547 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4548 struct beiscsi_hba *phba = beiscsi_conn->phba; 4549 struct iscsi_wrb *pwrb = NULL; 4550 unsigned int doorbell = 0; 4551 unsigned int cid; 4552 unsigned int pwrb_typeoffset = 0; 4553 4554 cid = beiscsi_conn->beiscsi_conn_cid; 4555 pwrb = io_task->pwrb_handle->pwrb; 4556 memset(pwrb, 0, sizeof(*pwrb)); 4557 4558 if (is_chip_be2_be3r(phba)) { 4559 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4560 be32_to_cpu(task->cmdsn)); 4561 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4562 io_task->pwrb_handle->wrb_index); 4563 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4564 io_task->psgl_handle->sgl_index); 4565 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4566 task->data_count); 4567 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4568 io_task->pwrb_handle->nxt_wrb_index); 4569 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4570 } else { 4571 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4572 be32_to_cpu(task->cmdsn)); 4573 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4574 io_task->pwrb_handle->wrb_index); 4575 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4576 io_task->psgl_handle->sgl_index); 4577 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4578 task->data_count); 4579 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4580 io_task->pwrb_handle->nxt_wrb_index); 4581 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 4582 } 4583 4584 4585 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 4586 case ISCSI_OP_LOGIN: 4587 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 4588 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4589 hwi_write_buffer(pwrb, task); 4590 break; 4591 case ISCSI_OP_NOOP_OUT: 4592 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 4593 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4594 if (is_chip_be2_be3r(phba)) 4595 AMAP_SET_BITS(struct amap_iscsi_wrb, 4596 dmsg, pwrb, 1); 4597 else 4598 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4599 dmsg, pwrb, 1); 4600 } else { 4601 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 4602 if (is_chip_be2_be3r(phba)) 4603 AMAP_SET_BITS(struct amap_iscsi_wrb, 4604 dmsg, pwrb, 0); 4605 else 4606 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 4607 dmsg, pwrb, 0); 4608 } 4609 hwi_write_buffer(pwrb, task); 4610 break; 4611 case ISCSI_OP_TEXT: 4612 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 4613 hwi_write_buffer(pwrb, task); 4614 break; 4615 case ISCSI_OP_SCSI_TMFUNC: 4616 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 4617 hwi_write_buffer(pwrb, task); 4618 break; 4619 case ISCSI_OP_LOGOUT: 4620 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 4621 hwi_write_buffer(pwrb, task); 4622 break; 4623 4624 default: 4625 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4626 "BM_%d : opcode =%d Not supported\n", 4627 task->hdr->opcode & ISCSI_OPCODE_MASK); 4628 4629 return -EINVAL; 4630 } 4631 4632 /* Set the task type */ 4633 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 4634 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 4635 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 4636 4637 doorbell |= cid & DB_WRB_POST_CID_MASK; 4638 doorbell |= (io_task->pwrb_handle->wrb_index & 4639 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4640 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4641 iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET); 4642 return 0; 4643 } 4644 4645 static int beiscsi_task_xmit(struct iscsi_task *task) 4646 { 4647 struct beiscsi_io_task *io_task = task->dd_data; 4648 struct scsi_cmnd *sc = task->sc; 4649 struct beiscsi_hba *phba = NULL; 4650 struct scatterlist *sg; 4651 int num_sg; 4652 unsigned int writedir = 0, xferlen = 0; 4653 4654 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; 4655 4656 if (!sc) 4657 return beiscsi_mtask(task); 4658 4659 io_task->scsi_cmnd = sc; 4660 num_sg = scsi_dma_map(sc); 4661 if (num_sg < 0) { 4662 struct iscsi_conn *conn = task->conn; 4663 struct beiscsi_hba *phba = NULL; 4664 4665 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 4666 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO, 4667 "BM_%d : scsi_dma_map Failed\n"); 4668 4669 return num_sg; 4670 } 4671 xferlen = scsi_bufflen(sc); 4672 sg = scsi_sglist(sc); 4673 if (sc->sc_data_direction == DMA_TO_DEVICE) 4674 writedir = 1; 4675 else 4676 writedir = 0; 4677 4678 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 4679 } 4680 4681 /** 4682 * beiscsi_bsg_request - handle bsg request from ISCSI transport 4683 * @job: job to handle 4684 */ 4685 static int beiscsi_bsg_request(struct bsg_job *job) 4686 { 4687 struct Scsi_Host *shost; 4688 struct beiscsi_hba *phba; 4689 struct iscsi_bsg_request *bsg_req = job->request; 4690 int rc = -EINVAL; 4691 unsigned int tag; 4692 struct be_dma_mem nonemb_cmd; 4693 struct be_cmd_resp_hdr *resp; 4694 struct iscsi_bsg_reply *bsg_reply = job->reply; 4695 unsigned short status, extd_status; 4696 4697 shost = iscsi_job_to_shost(job); 4698 phba = iscsi_host_priv(shost); 4699 4700 switch (bsg_req->msgcode) { 4701 case ISCSI_BSG_HST_VENDOR: 4702 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 4703 job->request_payload.payload_len, 4704 &nonemb_cmd.dma); 4705 if (nonemb_cmd.va == NULL) { 4706 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4707 "BM_%d : Failed to allocate memory for " 4708 "beiscsi_bsg_request\n"); 4709 return -ENOMEM; 4710 } 4711 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 4712 &nonemb_cmd); 4713 if (!tag) { 4714 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4715 "BM_%d : MBX Tag Allocation Failed\n"); 4716 4717 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4718 nonemb_cmd.va, nonemb_cmd.dma); 4719 return -EAGAIN; 4720 } 4721 4722 rc = wait_event_interruptible_timeout( 4723 phba->ctrl.mcc_wait[tag], 4724 phba->ctrl.mcc_numtag[tag], 4725 msecs_to_jiffies( 4726 BEISCSI_HOST_MBX_TIMEOUT)); 4727 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 4728 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 4729 free_mcc_tag(&phba->ctrl, tag); 4730 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 4731 sg_copy_from_buffer(job->reply_payload.sg_list, 4732 job->reply_payload.sg_cnt, 4733 nonemb_cmd.va, (resp->response_length 4734 + sizeof(*resp))); 4735 bsg_reply->reply_payload_rcv_len = resp->response_length; 4736 bsg_reply->result = status; 4737 bsg_job_done(job, bsg_reply->result, 4738 bsg_reply->reply_payload_rcv_len); 4739 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4740 nonemb_cmd.va, nonemb_cmd.dma); 4741 if (status || extd_status) { 4742 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4743 "BM_%d : MBX Cmd Failed" 4744 " status = %d extd_status = %d\n", 4745 status, extd_status); 4746 4747 return -EIO; 4748 } else { 4749 rc = 0; 4750 } 4751 break; 4752 4753 default: 4754 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 4755 "BM_%d : Unsupported bsg command: 0x%x\n", 4756 bsg_req->msgcode); 4757 break; 4758 } 4759 4760 return rc; 4761 } 4762 4763 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 4764 { 4765 /* Set the logging parameter */ 4766 beiscsi_log_enable_init(phba, beiscsi_log_enable); 4767 } 4768 4769 /* 4770 * beiscsi_quiesce()- Cleanup Driver resources 4771 * @phba: Instance Priv structure 4772 * 4773 * Free the OS and HW resources held by the driver 4774 **/ 4775 static void beiscsi_quiesce(struct beiscsi_hba *phba) 4776 { 4777 struct hwi_controller *phwi_ctrlr; 4778 struct hwi_context_memory *phwi_context; 4779 struct be_eq_obj *pbe_eq; 4780 unsigned int i, msix_vec; 4781 4782 phwi_ctrlr = phba->phwi_ctrlr; 4783 phwi_context = phwi_ctrlr->phwi_ctxt; 4784 hwi_disable_intr(phba); 4785 if (phba->msix_enabled) { 4786 for (i = 0; i <= phba->num_cpus; i++) { 4787 msix_vec = phba->msix_entries[i].vector; 4788 free_irq(msix_vec, &phwi_context->be_eq[i]); 4789 kfree(phba->msi_name[i]); 4790 } 4791 } else 4792 if (phba->pcidev->irq) 4793 free_irq(phba->pcidev->irq, phba); 4794 pci_disable_msix(phba->pcidev); 4795 destroy_workqueue(phba->wq); 4796 if (blk_iopoll_enabled) 4797 for (i = 0; i < phba->num_cpus; i++) { 4798 pbe_eq = &phwi_context->be_eq[i]; 4799 blk_iopoll_disable(&pbe_eq->iopoll); 4800 } 4801 4802 beiscsi_clean_port(phba); 4803 beiscsi_free_mem(phba); 4804 4805 beiscsi_unmap_pci_function(phba); 4806 pci_free_consistent(phba->pcidev, 4807 phba->ctrl.mbox_mem_alloced.size, 4808 phba->ctrl.mbox_mem_alloced.va, 4809 phba->ctrl.mbox_mem_alloced.dma); 4810 4811 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 4812 } 4813 4814 static void beiscsi_remove(struct pci_dev *pcidev) 4815 { 4816 4817 struct beiscsi_hba *phba = NULL; 4818 4819 phba = pci_get_drvdata(pcidev); 4820 if (!phba) { 4821 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 4822 return; 4823 } 4824 4825 beiscsi_destroy_def_ifaces(phba); 4826 beiscsi_quiesce(phba); 4827 iscsi_boot_destroy_kset(phba->boot_kset); 4828 iscsi_host_remove(phba->shost); 4829 pci_dev_put(phba->pcidev); 4830 iscsi_host_free(phba->shost); 4831 pci_disable_device(pcidev); 4832 } 4833 4834 static void beiscsi_shutdown(struct pci_dev *pcidev) 4835 { 4836 4837 struct beiscsi_hba *phba = NULL; 4838 4839 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 4840 if (!phba) { 4841 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 4842 return; 4843 } 4844 4845 beiscsi_quiesce(phba); 4846 pci_disable_device(pcidev); 4847 } 4848 4849 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 4850 { 4851 int i, status; 4852 4853 for (i = 0; i <= phba->num_cpus; i++) 4854 phba->msix_entries[i].entry = i; 4855 4856 status = pci_enable_msix(phba->pcidev, phba->msix_entries, 4857 (phba->num_cpus + 1)); 4858 if (!status) 4859 phba->msix_enabled = true; 4860 4861 return; 4862 } 4863 4864 /* 4865 * beiscsi_hw_health_check()- Check adapter health 4866 * @work: work item to check HW health 4867 * 4868 * Check if adapter in an unrecoverable state or not. 4869 **/ 4870 static void 4871 beiscsi_hw_health_check(struct work_struct *work) 4872 { 4873 struct beiscsi_hba *phba = 4874 container_of(work, struct beiscsi_hba, 4875 beiscsi_hw_check_task.work); 4876 4877 beiscsi_ue_detect(phba); 4878 4879 schedule_delayed_work(&phba->beiscsi_hw_check_task, 4880 msecs_to_jiffies(1000)); 4881 } 4882 4883 static int beiscsi_dev_probe(struct pci_dev *pcidev, 4884 const struct pci_device_id *id) 4885 { 4886 struct beiscsi_hba *phba = NULL; 4887 struct hwi_controller *phwi_ctrlr; 4888 struct hwi_context_memory *phwi_context; 4889 struct be_eq_obj *pbe_eq; 4890 int ret, i; 4891 4892 ret = beiscsi_enable_pci(pcidev); 4893 if (ret < 0) { 4894 dev_err(&pcidev->dev, 4895 "beiscsi_dev_probe - Failed to enable pci device\n"); 4896 return ret; 4897 } 4898 4899 phba = beiscsi_hba_alloc(pcidev); 4900 if (!phba) { 4901 dev_err(&pcidev->dev, 4902 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 4903 goto disable_pci; 4904 } 4905 4906 /* Initialize Driver configuration Paramters */ 4907 beiscsi_hba_attrs_init(phba); 4908 4909 phba->fw_timeout = false; 4910 4911 4912 switch (pcidev->device) { 4913 case BE_DEVICE_ID1: 4914 case OC_DEVICE_ID1: 4915 case OC_DEVICE_ID2: 4916 phba->generation = BE_GEN2; 4917 phba->iotask_fn = beiscsi_iotask; 4918 break; 4919 case BE_DEVICE_ID2: 4920 case OC_DEVICE_ID3: 4921 phba->generation = BE_GEN3; 4922 phba->iotask_fn = beiscsi_iotask; 4923 break; 4924 case OC_SKH_ID1: 4925 phba->generation = BE_GEN4; 4926 phba->iotask_fn = beiscsi_iotask_v2; 4927 break; 4928 default: 4929 phba->generation = 0; 4930 } 4931 4932 if (enable_msix) 4933 find_num_cpus(phba); 4934 else 4935 phba->num_cpus = 1; 4936 4937 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4938 "BM_%d : num_cpus = %d\n", 4939 phba->num_cpus); 4940 4941 if (enable_msix) { 4942 beiscsi_msix_enable(phba); 4943 if (!phba->msix_enabled) 4944 phba->num_cpus = 1; 4945 } 4946 ret = be_ctrl_init(phba, pcidev); 4947 if (ret) { 4948 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4949 "BM_%d : beiscsi_dev_probe-" 4950 "Failed in be_ctrl_init\n"); 4951 goto hba_free; 4952 } 4953 4954 ret = beiscsi_cmd_reset_function(phba); 4955 if (ret) { 4956 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4957 "BM_%d : Reset Failed. Aborting Crashdump\n"); 4958 goto hba_free; 4959 } 4960 ret = be_chk_reset_complete(phba); 4961 if (ret) { 4962 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4963 "BM_%d : Failed to get out of reset." 4964 "Aborting Crashdump\n"); 4965 goto hba_free; 4966 } 4967 4968 spin_lock_init(&phba->io_sgl_lock); 4969 spin_lock_init(&phba->mgmt_sgl_lock); 4970 spin_lock_init(&phba->isr_lock); 4971 ret = mgmt_get_fw_config(&phba->ctrl, phba); 4972 if (ret != 0) { 4973 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4974 "BM_%d : Error getting fw config\n"); 4975 goto free_port; 4976 } 4977 phba->shost->max_id = phba->fw_config.iscsi_cid_count; 4978 beiscsi_get_params(phba); 4979 phba->shost->can_queue = phba->params.ios_per_ctrl; 4980 ret = beiscsi_init_port(phba); 4981 if (ret < 0) { 4982 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4983 "BM_%d : beiscsi_dev_probe-" 4984 "Failed in beiscsi_init_port\n"); 4985 goto free_port; 4986 } 4987 4988 for (i = 0; i < MAX_MCC_CMD ; i++) { 4989 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 4990 phba->ctrl.mcc_tag[i] = i + 1; 4991 phba->ctrl.mcc_numtag[i + 1] = 0; 4992 phba->ctrl.mcc_tag_available++; 4993 } 4994 4995 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 4996 4997 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 4998 phba->shost->host_no); 4999 phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1); 5000 if (!phba->wq) { 5001 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5002 "BM_%d : beiscsi_dev_probe-" 5003 "Failed to allocate work queue\n"); 5004 goto free_twq; 5005 } 5006 5007 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, 5008 beiscsi_hw_health_check); 5009 5010 phwi_ctrlr = phba->phwi_ctrlr; 5011 phwi_context = phwi_ctrlr->phwi_ctxt; 5012 5013 if (blk_iopoll_enabled) { 5014 for (i = 0; i < phba->num_cpus; i++) { 5015 pbe_eq = &phwi_context->be_eq[i]; 5016 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5017 be_iopoll); 5018 blk_iopoll_enable(&pbe_eq->iopoll); 5019 } 5020 5021 i = (phba->msix_enabled) ? i : 0; 5022 /* Work item for MCC handling */ 5023 pbe_eq = &phwi_context->be_eq[i]; 5024 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5025 } else { 5026 if (phba->msix_enabled) { 5027 for (i = 0; i <= phba->num_cpus; i++) { 5028 pbe_eq = &phwi_context->be_eq[i]; 5029 INIT_WORK(&pbe_eq->work_cqs, 5030 beiscsi_process_all_cqs); 5031 } 5032 } else { 5033 pbe_eq = &phwi_context->be_eq[0]; 5034 INIT_WORK(&pbe_eq->work_cqs, 5035 beiscsi_process_all_cqs); 5036 } 5037 } 5038 5039 ret = beiscsi_init_irqs(phba); 5040 if (ret < 0) { 5041 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5042 "BM_%d : beiscsi_dev_probe-" 5043 "Failed to beiscsi_init_irqs\n"); 5044 goto free_blkenbld; 5045 } 5046 hwi_enable_intr(phba); 5047 5048 if (beiscsi_setup_boot_info(phba)) 5049 /* 5050 * log error but continue, because we may not be using 5051 * iscsi boot. 5052 */ 5053 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5054 "BM_%d : Could not set up " 5055 "iSCSI boot info.\n"); 5056 5057 beiscsi_create_def_ifaces(phba); 5058 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5059 msecs_to_jiffies(1000)); 5060 5061 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5062 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5063 return 0; 5064 5065 free_blkenbld: 5066 destroy_workqueue(phba->wq); 5067 if (blk_iopoll_enabled) 5068 for (i = 0; i < phba->num_cpus; i++) { 5069 pbe_eq = &phwi_context->be_eq[i]; 5070 blk_iopoll_disable(&pbe_eq->iopoll); 5071 } 5072 free_twq: 5073 beiscsi_clean_port(phba); 5074 beiscsi_free_mem(phba); 5075 free_port: 5076 pci_free_consistent(phba->pcidev, 5077 phba->ctrl.mbox_mem_alloced.size, 5078 phba->ctrl.mbox_mem_alloced.va, 5079 phba->ctrl.mbox_mem_alloced.dma); 5080 beiscsi_unmap_pci_function(phba); 5081 hba_free: 5082 if (phba->msix_enabled) 5083 pci_disable_msix(phba->pcidev); 5084 iscsi_host_remove(phba->shost); 5085 pci_dev_put(phba->pcidev); 5086 iscsi_host_free(phba->shost); 5087 disable_pci: 5088 pci_disable_device(pcidev); 5089 return ret; 5090 } 5091 5092 struct iscsi_transport beiscsi_iscsi_transport = { 5093 .owner = THIS_MODULE, 5094 .name = DRV_NAME, 5095 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5096 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5097 .create_session = beiscsi_session_create, 5098 .destroy_session = beiscsi_session_destroy, 5099 .create_conn = beiscsi_conn_create, 5100 .bind_conn = beiscsi_conn_bind, 5101 .destroy_conn = iscsi_conn_teardown, 5102 .attr_is_visible = be2iscsi_attr_is_visible, 5103 .set_iface_param = be2iscsi_iface_set_param, 5104 .get_iface_param = be2iscsi_iface_get_param, 5105 .set_param = beiscsi_set_param, 5106 .get_conn_param = iscsi_conn_get_param, 5107 .get_session_param = iscsi_session_get_param, 5108 .get_host_param = beiscsi_get_host_param, 5109 .start_conn = beiscsi_conn_start, 5110 .stop_conn = iscsi_conn_stop, 5111 .send_pdu = iscsi_conn_send_pdu, 5112 .xmit_task = beiscsi_task_xmit, 5113 .cleanup_task = beiscsi_cleanup_task, 5114 .alloc_pdu = beiscsi_alloc_pdu, 5115 .parse_pdu_itt = beiscsi_parse_pdu, 5116 .get_stats = beiscsi_conn_get_stats, 5117 .get_ep_param = beiscsi_ep_get_param, 5118 .ep_connect = beiscsi_ep_connect, 5119 .ep_poll = beiscsi_ep_poll, 5120 .ep_disconnect = beiscsi_ep_disconnect, 5121 .session_recovery_timedout = iscsi_session_recovery_timedout, 5122 .bsg_request = beiscsi_bsg_request, 5123 }; 5124 5125 static struct pci_driver beiscsi_pci_driver = { 5126 .name = DRV_NAME, 5127 .probe = beiscsi_dev_probe, 5128 .remove = beiscsi_remove, 5129 .shutdown = beiscsi_shutdown, 5130 .id_table = beiscsi_pci_id_table 5131 }; 5132 5133 5134 static int __init beiscsi_module_init(void) 5135 { 5136 int ret; 5137 5138 beiscsi_scsi_transport = 5139 iscsi_register_transport(&beiscsi_iscsi_transport); 5140 if (!beiscsi_scsi_transport) { 5141 printk(KERN_ERR 5142 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5143 return -ENOMEM; 5144 } 5145 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5146 &beiscsi_iscsi_transport); 5147 5148 ret = pci_register_driver(&beiscsi_pci_driver); 5149 if (ret) { 5150 printk(KERN_ERR 5151 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5152 goto unregister_iscsi_transport; 5153 } 5154 return 0; 5155 5156 unregister_iscsi_transport: 5157 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5158 return ret; 5159 } 5160 5161 static void __exit beiscsi_module_exit(void) 5162 { 5163 pci_unregister_driver(&beiscsi_pci_driver); 5164 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5165 } 5166 5167 module_init(beiscsi_module_init); 5168 module_exit(beiscsi_module_exit); 5169