1 /** 2 * Copyright (C) 2005 - 2014 Emulex 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com) 11 * 12 * Contact Information: 13 * linux-drivers@emulex.com 14 * 15 * Emulex 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 33 #include <scsi/libiscsi.h> 34 #include <scsi/scsi_bsg_iscsi.h> 35 #include <scsi/scsi_netlink.h> 36 #include <scsi/scsi_transport_iscsi.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi.h> 42 #include "be_main.h" 43 #include "be_iscsi.h" 44 #include "be_mgmt.h" 45 #include "be_cmds.h" 46 47 static unsigned int be_iopoll_budget = 10; 48 static unsigned int be_max_phys_size = 64; 49 static unsigned int enable_msix = 1; 50 51 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 53 MODULE_VERSION(BUILD_STR); 54 MODULE_AUTHOR("Emulex Corporation"); 55 MODULE_LICENSE("GPL"); 56 module_param(be_iopoll_budget, int, 0); 57 module_param(enable_msix, int, 0); 58 module_param(be_max_phys_size, uint, S_IRUGO); 59 MODULE_PARM_DESC(be_max_phys_size, 60 "Maximum Size (In Kilobytes) of physically contiguous " 61 "memory that can be allocated. Range is 16 - 128"); 62 63 #define beiscsi_disp_param(_name)\ 64 ssize_t \ 65 beiscsi_##_name##_disp(struct device *dev,\ 66 struct device_attribute *attrib, char *buf) \ 67 { \ 68 struct Scsi_Host *shost = class_to_shost(dev);\ 69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 70 uint32_t param_val = 0; \ 71 param_val = phba->attr_##_name;\ 72 return snprintf(buf, PAGE_SIZE, "%d\n",\ 73 phba->attr_##_name);\ 74 } 75 76 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 77 int \ 78 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 79 {\ 80 if (val >= _minval && val <= _maxval) {\ 81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 82 "BA_%d : beiscsi_"#_name" updated "\ 83 "from 0x%x ==> 0x%x\n",\ 84 phba->attr_##_name, val); \ 85 phba->attr_##_name = val;\ 86 return 0;\ 87 } \ 88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 89 "BA_%d beiscsi_"#_name" attribute "\ 90 "cannot be updated to 0x%x, "\ 91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 92 return -EINVAL;\ 93 } 94 95 #define beiscsi_store_param(_name) \ 96 ssize_t \ 97 beiscsi_##_name##_store(struct device *dev,\ 98 struct device_attribute *attr, const char *buf,\ 99 size_t count) \ 100 { \ 101 struct Scsi_Host *shost = class_to_shost(dev);\ 102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 103 uint32_t param_val = 0;\ 104 if (!isdigit(buf[0]))\ 105 return -EINVAL;\ 106 if (sscanf(buf, "%i", ¶m_val) != 1)\ 107 return -EINVAL;\ 108 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 109 return strlen(buf);\ 110 else \ 111 return -EINVAL;\ 112 } 113 114 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 115 int \ 116 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 117 { \ 118 if (val >= _minval && val <= _maxval) {\ 119 phba->attr_##_name = val;\ 120 return 0;\ 121 } \ 122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 123 "BA_%d beiscsi_"#_name" attribute " \ 124 "cannot be updated to 0x%x, "\ 125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 126 phba->attr_##_name = _defval;\ 127 return -EINVAL;\ 128 } 129 130 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 131 static uint beiscsi_##_name = _defval;\ 132 module_param(beiscsi_##_name, uint, S_IRUGO);\ 133 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 134 beiscsi_disp_param(_name)\ 135 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 136 beiscsi_store_param(_name)\ 137 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 138 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 139 beiscsi_##_name##_disp, beiscsi_##_name##_store) 140 141 /* 142 * When new log level added update the 143 * the MAX allowed value for log_enable 144 */ 145 BEISCSI_RW_ATTR(log_enable, 0x00, 146 0xFF, 0x00, "Enable logging Bit Mask\n" 147 "\t\t\t\tInitialization Events : 0x01\n" 148 "\t\t\t\tMailbox Events : 0x02\n" 149 "\t\t\t\tMiscellaneous Events : 0x04\n" 150 "\t\t\t\tError Handling : 0x08\n" 151 "\t\t\t\tIO Path Events : 0x10\n" 152 "\t\t\t\tConfiguration Path : 0x20\n" 153 "\t\t\t\tiSCSI Protocol : 0x40\n"); 154 155 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 156 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 157 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 158 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 159 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 160 beiscsi_active_session_disp, NULL); 161 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 162 beiscsi_free_session_disp, NULL); 163 struct device_attribute *beiscsi_attrs[] = { 164 &dev_attr_beiscsi_log_enable, 165 &dev_attr_beiscsi_drvr_ver, 166 &dev_attr_beiscsi_adapter_family, 167 &dev_attr_beiscsi_fw_ver, 168 &dev_attr_beiscsi_active_session_count, 169 &dev_attr_beiscsi_free_session_count, 170 &dev_attr_beiscsi_phys_port, 171 NULL, 172 }; 173 174 static char const *cqe_desc[] = { 175 "RESERVED_DESC", 176 "SOL_CMD_COMPLETE", 177 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 178 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 179 "CXN_KILLED_BURST_LEN_MISMATCH", 180 "CXN_KILLED_AHS_RCVD", 181 "CXN_KILLED_HDR_DIGEST_ERR", 182 "CXN_KILLED_UNKNOWN_HDR", 183 "CXN_KILLED_STALE_ITT_TTT_RCVD", 184 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 185 "CXN_KILLED_RST_RCVD", 186 "CXN_KILLED_TIMED_OUT", 187 "CXN_KILLED_RST_SENT", 188 "CXN_KILLED_FIN_RCVD", 189 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 190 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 191 "CXN_KILLED_OVER_RUN_RESIDUAL", 192 "CXN_KILLED_UNDER_RUN_RESIDUAL", 193 "CMD_KILLED_INVALID_STATSN_RCVD", 194 "CMD_KILLED_INVALID_R2T_RCVD", 195 "CMD_CXN_KILLED_LUN_INVALID", 196 "CMD_CXN_KILLED_ICD_INVALID", 197 "CMD_CXN_KILLED_ITT_INVALID", 198 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 199 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 200 "CXN_INVALIDATE_NOTIFY", 201 "CXN_INVALIDATE_INDEX_NOTIFY", 202 "CMD_INVALIDATED_NOTIFY", 203 "UNSOL_HDR_NOTIFY", 204 "UNSOL_DATA_NOTIFY", 205 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 206 "DRIVERMSG_NOTIFY", 207 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 208 "SOL_CMD_KILLED_DIF_ERR", 209 "CXN_KILLED_SYN_RCVD", 210 "CXN_KILLED_IMM_DATA_RCVD" 211 }; 212 213 static int beiscsi_slave_configure(struct scsi_device *sdev) 214 { 215 blk_queue_max_segment_size(sdev->request_queue, 65536); 216 return 0; 217 } 218 219 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 220 { 221 struct iscsi_cls_session *cls_session; 222 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 223 struct beiscsi_io_task *aborted_io_task; 224 struct iscsi_conn *conn; 225 struct beiscsi_conn *beiscsi_conn; 226 struct beiscsi_hba *phba; 227 struct iscsi_session *session; 228 struct invalidate_command_table *inv_tbl; 229 struct be_dma_mem nonemb_cmd; 230 unsigned int cid, tag, num_invalidate; 231 int rc; 232 233 cls_session = starget_to_session(scsi_target(sc->device)); 234 session = cls_session->dd_data; 235 236 spin_lock_bh(&session->frwd_lock); 237 if (!aborted_task || !aborted_task->sc) { 238 /* we raced */ 239 spin_unlock_bh(&session->frwd_lock); 240 return SUCCESS; 241 } 242 243 aborted_io_task = aborted_task->dd_data; 244 if (!aborted_io_task->scsi_cmnd) { 245 /* raced or invalid command */ 246 spin_unlock_bh(&session->frwd_lock); 247 return SUCCESS; 248 } 249 spin_unlock_bh(&session->frwd_lock); 250 /* Invalidate WRB Posted for this Task */ 251 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 252 aborted_io_task->pwrb_handle->pwrb, 253 1); 254 255 conn = aborted_task->conn; 256 beiscsi_conn = conn->dd_data; 257 phba = beiscsi_conn->phba; 258 259 /* invalidate iocb */ 260 cid = beiscsi_conn->beiscsi_conn_cid; 261 inv_tbl = phba->inv_tbl; 262 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 263 inv_tbl->cid = cid; 264 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 265 num_invalidate = 1; 266 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 267 sizeof(struct invalidate_commands_params_in), 268 &nonemb_cmd.dma); 269 if (nonemb_cmd.va == NULL) { 270 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 271 "BM_%d : Failed to allocate memory for" 272 "mgmt_invalidate_icds\n"); 273 return FAILED; 274 } 275 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 276 277 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 278 cid, &nonemb_cmd); 279 if (!tag) { 280 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 281 "BM_%d : mgmt_invalidate_icds could not be" 282 "submitted\n"); 283 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 284 nonemb_cmd.va, nonemb_cmd.dma); 285 286 return FAILED; 287 } 288 289 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 290 if (rc != -EBUSY) 291 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 292 nonemb_cmd.va, nonemb_cmd.dma); 293 294 return iscsi_eh_abort(sc); 295 } 296 297 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 298 { 299 struct iscsi_task *abrt_task; 300 struct beiscsi_io_task *abrt_io_task; 301 struct iscsi_conn *conn; 302 struct beiscsi_conn *beiscsi_conn; 303 struct beiscsi_hba *phba; 304 struct iscsi_session *session; 305 struct iscsi_cls_session *cls_session; 306 struct invalidate_command_table *inv_tbl; 307 struct be_dma_mem nonemb_cmd; 308 unsigned int cid, tag, i, num_invalidate; 309 int rc; 310 311 /* invalidate iocbs */ 312 cls_session = starget_to_session(scsi_target(sc->device)); 313 session = cls_session->dd_data; 314 spin_lock_bh(&session->frwd_lock); 315 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 316 spin_unlock_bh(&session->frwd_lock); 317 return FAILED; 318 } 319 conn = session->leadconn; 320 beiscsi_conn = conn->dd_data; 321 phba = beiscsi_conn->phba; 322 cid = beiscsi_conn->beiscsi_conn_cid; 323 inv_tbl = phba->inv_tbl; 324 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 325 num_invalidate = 0; 326 for (i = 0; i < conn->session->cmds_max; i++) { 327 abrt_task = conn->session->cmds[i]; 328 abrt_io_task = abrt_task->dd_data; 329 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 330 continue; 331 332 if (sc->device->lun != abrt_task->sc->device->lun) 333 continue; 334 335 /* Invalidate WRB Posted for this Task */ 336 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 337 abrt_io_task->pwrb_handle->pwrb, 338 1); 339 340 inv_tbl->cid = cid; 341 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 342 num_invalidate++; 343 inv_tbl++; 344 } 345 spin_unlock_bh(&session->frwd_lock); 346 inv_tbl = phba->inv_tbl; 347 348 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 349 sizeof(struct invalidate_commands_params_in), 350 &nonemb_cmd.dma); 351 if (nonemb_cmd.va == NULL) { 352 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 353 "BM_%d : Failed to allocate memory for" 354 "mgmt_invalidate_icds\n"); 355 return FAILED; 356 } 357 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 358 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 359 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 360 cid, &nonemb_cmd); 361 if (!tag) { 362 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 363 "BM_%d : mgmt_invalidate_icds could not be" 364 " submitted\n"); 365 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 366 nonemb_cmd.va, nonemb_cmd.dma); 367 return FAILED; 368 } 369 370 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 371 if (rc != -EBUSY) 372 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 373 nonemb_cmd.va, nonemb_cmd.dma); 374 return iscsi_eh_device_reset(sc); 375 } 376 377 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 378 { 379 struct beiscsi_hba *phba = data; 380 struct mgmt_session_info *boot_sess = &phba->boot_sess; 381 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 382 char *str = buf; 383 int rc; 384 385 switch (type) { 386 case ISCSI_BOOT_TGT_NAME: 387 rc = sprintf(buf, "%.*s\n", 388 (int)strlen(boot_sess->target_name), 389 (char *)&boot_sess->target_name); 390 break; 391 case ISCSI_BOOT_TGT_IP_ADDR: 392 if (boot_conn->dest_ipaddr.ip_type == 0x1) 393 rc = sprintf(buf, "%pI4\n", 394 (char *)&boot_conn->dest_ipaddr.addr); 395 else 396 rc = sprintf(str, "%pI6\n", 397 (char *)&boot_conn->dest_ipaddr.addr); 398 break; 399 case ISCSI_BOOT_TGT_PORT: 400 rc = sprintf(str, "%d\n", boot_conn->dest_port); 401 break; 402 403 case ISCSI_BOOT_TGT_CHAP_NAME: 404 rc = sprintf(str, "%.*s\n", 405 boot_conn->negotiated_login_options.auth_data.chap. 406 target_chap_name_length, 407 (char *)&boot_conn->negotiated_login_options. 408 auth_data.chap.target_chap_name); 409 break; 410 case ISCSI_BOOT_TGT_CHAP_SECRET: 411 rc = sprintf(str, "%.*s\n", 412 boot_conn->negotiated_login_options.auth_data.chap. 413 target_secret_length, 414 (char *)&boot_conn->negotiated_login_options. 415 auth_data.chap.target_secret); 416 break; 417 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 418 rc = sprintf(str, "%.*s\n", 419 boot_conn->negotiated_login_options.auth_data.chap. 420 intr_chap_name_length, 421 (char *)&boot_conn->negotiated_login_options. 422 auth_data.chap.intr_chap_name); 423 break; 424 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 425 rc = sprintf(str, "%.*s\n", 426 boot_conn->negotiated_login_options.auth_data.chap. 427 intr_secret_length, 428 (char *)&boot_conn->negotiated_login_options. 429 auth_data.chap.intr_secret); 430 break; 431 case ISCSI_BOOT_TGT_FLAGS: 432 rc = sprintf(str, "2\n"); 433 break; 434 case ISCSI_BOOT_TGT_NIC_ASSOC: 435 rc = sprintf(str, "0\n"); 436 break; 437 default: 438 rc = -ENOSYS; 439 break; 440 } 441 return rc; 442 } 443 444 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 445 { 446 struct beiscsi_hba *phba = data; 447 char *str = buf; 448 int rc; 449 450 switch (type) { 451 case ISCSI_BOOT_INI_INITIATOR_NAME: 452 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 453 break; 454 default: 455 rc = -ENOSYS; 456 break; 457 } 458 return rc; 459 } 460 461 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 462 { 463 struct beiscsi_hba *phba = data; 464 char *str = buf; 465 int rc; 466 467 switch (type) { 468 case ISCSI_BOOT_ETH_FLAGS: 469 rc = sprintf(str, "2\n"); 470 break; 471 case ISCSI_BOOT_ETH_INDEX: 472 rc = sprintf(str, "0\n"); 473 break; 474 case ISCSI_BOOT_ETH_MAC: 475 rc = beiscsi_get_macaddr(str, phba); 476 break; 477 default: 478 rc = -ENOSYS; 479 break; 480 } 481 return rc; 482 } 483 484 485 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 486 { 487 umode_t rc; 488 489 switch (type) { 490 case ISCSI_BOOT_TGT_NAME: 491 case ISCSI_BOOT_TGT_IP_ADDR: 492 case ISCSI_BOOT_TGT_PORT: 493 case ISCSI_BOOT_TGT_CHAP_NAME: 494 case ISCSI_BOOT_TGT_CHAP_SECRET: 495 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 496 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 497 case ISCSI_BOOT_TGT_NIC_ASSOC: 498 case ISCSI_BOOT_TGT_FLAGS: 499 rc = S_IRUGO; 500 break; 501 default: 502 rc = 0; 503 break; 504 } 505 return rc; 506 } 507 508 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 509 { 510 umode_t rc; 511 512 switch (type) { 513 case ISCSI_BOOT_INI_INITIATOR_NAME: 514 rc = S_IRUGO; 515 break; 516 default: 517 rc = 0; 518 break; 519 } 520 return rc; 521 } 522 523 524 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 525 { 526 umode_t rc; 527 528 switch (type) { 529 case ISCSI_BOOT_ETH_FLAGS: 530 case ISCSI_BOOT_ETH_MAC: 531 case ISCSI_BOOT_ETH_INDEX: 532 rc = S_IRUGO; 533 break; 534 default: 535 rc = 0; 536 break; 537 } 538 return rc; 539 } 540 541 /*------------------- PCI Driver operations and data ----------------- */ 542 static const struct pci_device_id beiscsi_pci_id_table[] = { 543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 544 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 545 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 546 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 547 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 548 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 549 { 0 } 550 }; 551 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 552 553 554 static struct scsi_host_template beiscsi_sht = { 555 .module = THIS_MODULE, 556 .name = "Emulex 10Gbe open-iscsi Initiator Driver", 557 .proc_name = DRV_NAME, 558 .queuecommand = iscsi_queuecommand, 559 .change_queue_depth = scsi_change_queue_depth, 560 .slave_configure = beiscsi_slave_configure, 561 .target_alloc = iscsi_target_alloc, 562 .eh_abort_handler = beiscsi_eh_abort, 563 .eh_device_reset_handler = beiscsi_eh_device_reset, 564 .eh_target_reset_handler = iscsi_eh_session_reset, 565 .shost_attrs = beiscsi_attrs, 566 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 567 .can_queue = BE2_IO_DEPTH, 568 .this_id = -1, 569 .max_sectors = BEISCSI_MAX_SECTORS, 570 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 571 .use_clustering = ENABLE_CLUSTERING, 572 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 573 .track_queue_depth = 1, 574 }; 575 576 static struct scsi_transport_template *beiscsi_scsi_transport; 577 578 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 579 { 580 struct beiscsi_hba *phba; 581 struct Scsi_Host *shost; 582 583 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 584 if (!shost) { 585 dev_err(&pcidev->dev, 586 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 587 return NULL; 588 } 589 shost->dma_boundary = pcidev->dma_mask; 590 shost->max_id = BE2_MAX_SESSIONS; 591 shost->max_channel = 0; 592 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 593 shost->max_lun = BEISCSI_NUM_MAX_LUN; 594 shost->transportt = beiscsi_scsi_transport; 595 phba = iscsi_host_priv(shost); 596 memset(phba, 0, sizeof(*phba)); 597 phba->shost = shost; 598 phba->pcidev = pci_dev_get(pcidev); 599 pci_set_drvdata(pcidev, phba); 600 phba->interface_handle = 0xFFFFFFFF; 601 602 return phba; 603 } 604 605 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 606 { 607 if (phba->csr_va) { 608 iounmap(phba->csr_va); 609 phba->csr_va = NULL; 610 } 611 if (phba->db_va) { 612 iounmap(phba->db_va); 613 phba->db_va = NULL; 614 } 615 if (phba->pci_va) { 616 iounmap(phba->pci_va); 617 phba->pci_va = NULL; 618 } 619 } 620 621 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 622 struct pci_dev *pcidev) 623 { 624 u8 __iomem *addr; 625 int pcicfg_reg; 626 627 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 628 pci_resource_len(pcidev, 2)); 629 if (addr == NULL) 630 return -ENOMEM; 631 phba->ctrl.csr = addr; 632 phba->csr_va = addr; 633 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 634 635 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 636 if (addr == NULL) 637 goto pci_map_err; 638 phba->ctrl.db = addr; 639 phba->db_va = addr; 640 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 641 642 if (phba->generation == BE_GEN2) 643 pcicfg_reg = 1; 644 else 645 pcicfg_reg = 0; 646 647 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 648 pci_resource_len(pcidev, pcicfg_reg)); 649 650 if (addr == NULL) 651 goto pci_map_err; 652 phba->ctrl.pcicfg = addr; 653 phba->pci_va = addr; 654 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 655 return 0; 656 657 pci_map_err: 658 beiscsi_unmap_pci_function(phba); 659 return -ENOMEM; 660 } 661 662 static int beiscsi_enable_pci(struct pci_dev *pcidev) 663 { 664 int ret; 665 666 ret = pci_enable_device(pcidev); 667 if (ret) { 668 dev_err(&pcidev->dev, 669 "beiscsi_enable_pci - enable device failed\n"); 670 return ret; 671 } 672 673 pci_set_master(pcidev); 674 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 675 if (ret) { 676 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 677 if (ret) { 678 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 679 pci_disable_device(pcidev); 680 return ret; 681 } else { 682 ret = pci_set_consistent_dma_mask(pcidev, 683 DMA_BIT_MASK(32)); 684 } 685 } else { 686 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 687 if (ret) { 688 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 689 pci_disable_device(pcidev); 690 return ret; 691 } 692 } 693 return 0; 694 } 695 696 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 697 { 698 struct be_ctrl_info *ctrl = &phba->ctrl; 699 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 700 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 701 int status = 0; 702 703 ctrl->pdev = pdev; 704 status = beiscsi_map_pci_bars(phba, pdev); 705 if (status) 706 return status; 707 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 708 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 709 mbox_mem_alloc->size, 710 &mbox_mem_alloc->dma); 711 if (!mbox_mem_alloc->va) { 712 beiscsi_unmap_pci_function(phba); 713 return -ENOMEM; 714 } 715 716 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 717 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 718 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 719 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 720 spin_lock_init(&ctrl->mbox_lock); 721 spin_lock_init(&phba->ctrl.mcc_lock); 722 spin_lock_init(&phba->ctrl.mcc_cq_lock); 723 724 return status; 725 } 726 727 /** 728 * beiscsi_get_params()- Set the config paramters 729 * @phba: ptr device priv structure 730 **/ 731 static void beiscsi_get_params(struct beiscsi_hba *phba) 732 { 733 uint32_t total_cid_count = 0; 734 uint32_t total_icd_count = 0; 735 uint8_t ulp_num = 0; 736 737 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 738 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 739 740 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 741 uint32_t align_mask = 0; 742 uint32_t icd_post_per_page = 0; 743 uint32_t icd_count_unavailable = 0; 744 uint32_t icd_start = 0, icd_count = 0; 745 uint32_t icd_start_align = 0, icd_count_align = 0; 746 747 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 748 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 749 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 750 751 /* Get ICD count that can be posted on each page */ 752 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 753 sizeof(struct iscsi_sge))); 754 align_mask = (icd_post_per_page - 1); 755 756 /* Check if icd_start is aligned ICD per page posting */ 757 if (icd_start % icd_post_per_page) { 758 icd_start_align = ((icd_start + 759 icd_post_per_page) & 760 ~(align_mask)); 761 phba->fw_config. 762 iscsi_icd_start[ulp_num] = 763 icd_start_align; 764 } 765 766 icd_count_align = (icd_count & ~align_mask); 767 768 /* ICD discarded in the process of alignment */ 769 if (icd_start_align) 770 icd_count_unavailable = ((icd_start_align - 771 icd_start) + 772 (icd_count - 773 icd_count_align)); 774 775 /* Updated ICD count available */ 776 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 777 icd_count_unavailable); 778 779 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 780 "BM_%d : Aligned ICD values\n" 781 "\t ICD Start : %d\n" 782 "\t ICD Count : %d\n" 783 "\t ICD Discarded : %d\n", 784 phba->fw_config. 785 iscsi_icd_start[ulp_num], 786 phba->fw_config. 787 iscsi_icd_count[ulp_num], 788 icd_count_unavailable); 789 break; 790 } 791 } 792 793 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 794 phba->params.ios_per_ctrl = (total_icd_count - 795 (total_cid_count + 796 BE2_TMFS + BE2_NOPOUT_REQ)); 797 phba->params.cxns_per_ctrl = total_cid_count; 798 phba->params.asyncpdus_per_ctrl = total_cid_count; 799 phba->params.icds_per_ctrl = total_icd_count; 800 phba->params.num_sge_per_io = BE2_SGE; 801 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 802 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 803 phba->params.eq_timer = 64; 804 phba->params.num_eq_entries = 1024; 805 phba->params.num_cq_entries = 1024; 806 phba->params.wrbs_per_cxn = 256; 807 } 808 809 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 810 unsigned int id, unsigned int clr_interrupt, 811 unsigned int num_processed, 812 unsigned char rearm, unsigned char event) 813 { 814 u32 val = 0; 815 816 if (rearm) 817 val |= 1 << DB_EQ_REARM_SHIFT; 818 if (clr_interrupt) 819 val |= 1 << DB_EQ_CLR_SHIFT; 820 if (event) 821 val |= 1 << DB_EQ_EVNT_SHIFT; 822 823 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 824 /* Setting lower order EQ_ID Bits */ 825 val |= (id & DB_EQ_RING_ID_LOW_MASK); 826 827 /* Setting Higher order EQ_ID Bits */ 828 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 829 DB_EQ_RING_ID_HIGH_MASK) 830 << DB_EQ_HIGH_SET_SHIFT); 831 832 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 833 } 834 835 /** 836 * be_isr_mcc - The isr routine of the driver. 837 * @irq: Not used 838 * @dev_id: Pointer to host adapter structure 839 */ 840 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 841 { 842 struct beiscsi_hba *phba; 843 struct be_eq_entry *eqe = NULL; 844 struct be_queue_info *eq; 845 struct be_queue_info *mcc; 846 unsigned int num_eq_processed; 847 struct be_eq_obj *pbe_eq; 848 unsigned long flags; 849 850 pbe_eq = dev_id; 851 eq = &pbe_eq->q; 852 phba = pbe_eq->phba; 853 mcc = &phba->ctrl.mcc_obj.cq; 854 eqe = queue_tail_node(eq); 855 856 num_eq_processed = 0; 857 858 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 859 & EQE_VALID_MASK) { 860 if (((eqe->dw[offsetof(struct amap_eq_entry, 861 resource_id) / 32] & 862 EQE_RESID_MASK) >> 16) == mcc->id) { 863 spin_lock_irqsave(&phba->isr_lock, flags); 864 pbe_eq->todo_mcc_cq = true; 865 spin_unlock_irqrestore(&phba->isr_lock, flags); 866 } 867 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 868 queue_tail_inc(eq); 869 eqe = queue_tail_node(eq); 870 num_eq_processed++; 871 } 872 if (pbe_eq->todo_mcc_cq) 873 queue_work(phba->wq, &pbe_eq->work_cqs); 874 if (num_eq_processed) 875 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 876 877 return IRQ_HANDLED; 878 } 879 880 /** 881 * be_isr_msix - The isr routine of the driver. 882 * @irq: Not used 883 * @dev_id: Pointer to host adapter structure 884 */ 885 static irqreturn_t be_isr_msix(int irq, void *dev_id) 886 { 887 struct beiscsi_hba *phba; 888 struct be_eq_entry *eqe = NULL; 889 struct be_queue_info *eq; 890 struct be_queue_info *cq; 891 unsigned int num_eq_processed; 892 struct be_eq_obj *pbe_eq; 893 894 pbe_eq = dev_id; 895 eq = &pbe_eq->q; 896 cq = pbe_eq->cq; 897 eqe = queue_tail_node(eq); 898 899 phba = pbe_eq->phba; 900 num_eq_processed = 0; 901 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 902 & EQE_VALID_MASK) { 903 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 904 blk_iopoll_sched(&pbe_eq->iopoll); 905 906 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 907 queue_tail_inc(eq); 908 eqe = queue_tail_node(eq); 909 num_eq_processed++; 910 } 911 912 if (num_eq_processed) 913 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 914 915 return IRQ_HANDLED; 916 } 917 918 /** 919 * be_isr - The isr routine of the driver. 920 * @irq: Not used 921 * @dev_id: Pointer to host adapter structure 922 */ 923 static irqreturn_t be_isr(int irq, void *dev_id) 924 { 925 struct beiscsi_hba *phba; 926 struct hwi_controller *phwi_ctrlr; 927 struct hwi_context_memory *phwi_context; 928 struct be_eq_entry *eqe = NULL; 929 struct be_queue_info *eq; 930 struct be_queue_info *mcc; 931 unsigned long flags, index; 932 unsigned int num_mcceq_processed, num_ioeq_processed; 933 struct be_ctrl_info *ctrl; 934 struct be_eq_obj *pbe_eq; 935 int isr; 936 937 phba = dev_id; 938 ctrl = &phba->ctrl; 939 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 940 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 941 if (!isr) 942 return IRQ_NONE; 943 944 phwi_ctrlr = phba->phwi_ctrlr; 945 phwi_context = phwi_ctrlr->phwi_ctxt; 946 pbe_eq = &phwi_context->be_eq[0]; 947 948 eq = &phwi_context->be_eq[0].q; 949 mcc = &phba->ctrl.mcc_obj.cq; 950 index = 0; 951 eqe = queue_tail_node(eq); 952 953 num_ioeq_processed = 0; 954 num_mcceq_processed = 0; 955 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 956 & EQE_VALID_MASK) { 957 if (((eqe->dw[offsetof(struct amap_eq_entry, 958 resource_id) / 32] & 959 EQE_RESID_MASK) >> 16) == mcc->id) { 960 spin_lock_irqsave(&phba->isr_lock, flags); 961 pbe_eq->todo_mcc_cq = true; 962 spin_unlock_irqrestore(&phba->isr_lock, flags); 963 num_mcceq_processed++; 964 } else { 965 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 966 blk_iopoll_sched(&pbe_eq->iopoll); 967 num_ioeq_processed++; 968 } 969 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 970 queue_tail_inc(eq); 971 eqe = queue_tail_node(eq); 972 } 973 if (num_ioeq_processed || num_mcceq_processed) { 974 if (pbe_eq->todo_mcc_cq) 975 queue_work(phba->wq, &pbe_eq->work_cqs); 976 977 if ((num_mcceq_processed) && (!num_ioeq_processed)) 978 hwi_ring_eq_db(phba, eq->id, 0, 979 (num_ioeq_processed + 980 num_mcceq_processed) , 1, 1); 981 else 982 hwi_ring_eq_db(phba, eq->id, 0, 983 (num_ioeq_processed + 984 num_mcceq_processed), 0, 1); 985 986 return IRQ_HANDLED; 987 } else 988 return IRQ_NONE; 989 } 990 991 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 992 { 993 struct pci_dev *pcidev = phba->pcidev; 994 struct hwi_controller *phwi_ctrlr; 995 struct hwi_context_memory *phwi_context; 996 int ret, msix_vec, i, j; 997 998 phwi_ctrlr = phba->phwi_ctrlr; 999 phwi_context = phwi_ctrlr->phwi_ctxt; 1000 1001 if (phba->msix_enabled) { 1002 for (i = 0; i < phba->num_cpus; i++) { 1003 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 1004 GFP_KERNEL); 1005 if (!phba->msi_name[i]) { 1006 ret = -ENOMEM; 1007 goto free_msix_irqs; 1008 } 1009 1010 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 1011 phba->shost->host_no, i); 1012 msix_vec = phba->msix_entries[i].vector; 1013 ret = request_irq(msix_vec, be_isr_msix, 0, 1014 phba->msi_name[i], 1015 &phwi_context->be_eq[i]); 1016 if (ret) { 1017 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1018 "BM_%d : beiscsi_init_irqs-Failed to" 1019 "register msix for i = %d\n", 1020 i); 1021 kfree(phba->msi_name[i]); 1022 goto free_msix_irqs; 1023 } 1024 } 1025 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 1026 if (!phba->msi_name[i]) { 1027 ret = -ENOMEM; 1028 goto free_msix_irqs; 1029 } 1030 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 1031 phba->shost->host_no); 1032 msix_vec = phba->msix_entries[i].vector; 1033 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 1034 &phwi_context->be_eq[i]); 1035 if (ret) { 1036 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 1037 "BM_%d : beiscsi_init_irqs-" 1038 "Failed to register beiscsi_msix_mcc\n"); 1039 kfree(phba->msi_name[i]); 1040 goto free_msix_irqs; 1041 } 1042 1043 } else { 1044 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 1045 "beiscsi", phba); 1046 if (ret) { 1047 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1048 "BM_%d : beiscsi_init_irqs-" 1049 "Failed to register irq\\n"); 1050 return ret; 1051 } 1052 } 1053 return 0; 1054 free_msix_irqs: 1055 for (j = i - 1; j >= 0; j--) { 1056 kfree(phba->msi_name[j]); 1057 msix_vec = phba->msix_entries[j].vector; 1058 free_irq(msix_vec, &phwi_context->be_eq[j]); 1059 } 1060 return ret; 1061 } 1062 1063 void hwi_ring_cq_db(struct beiscsi_hba *phba, 1064 unsigned int id, unsigned int num_processed, 1065 unsigned char rearm, unsigned char event) 1066 { 1067 u32 val = 0; 1068 1069 if (rearm) 1070 val |= 1 << DB_CQ_REARM_SHIFT; 1071 1072 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 1073 1074 /* Setting lower order CQ_ID Bits */ 1075 val |= (id & DB_CQ_RING_ID_LOW_MASK); 1076 1077 /* Setting Higher order CQ_ID Bits */ 1078 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 1079 DB_CQ_RING_ID_HIGH_MASK) 1080 << DB_CQ_HIGH_SET_SHIFT); 1081 1082 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 1083 } 1084 1085 static unsigned int 1086 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1087 struct beiscsi_hba *phba, 1088 struct pdu_base *ppdu, 1089 unsigned long pdu_len, 1090 void *pbuffer, unsigned long buf_len) 1091 { 1092 struct iscsi_conn *conn = beiscsi_conn->conn; 1093 struct iscsi_session *session = conn->session; 1094 struct iscsi_task *task; 1095 struct beiscsi_io_task *io_task; 1096 struct iscsi_hdr *login_hdr; 1097 1098 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 1099 PDUBASE_OPCODE_MASK) { 1100 case ISCSI_OP_NOOP_IN: 1101 pbuffer = NULL; 1102 buf_len = 0; 1103 break; 1104 case ISCSI_OP_ASYNC_EVENT: 1105 break; 1106 case ISCSI_OP_REJECT: 1107 WARN_ON(!pbuffer); 1108 WARN_ON(!(buf_len == 48)); 1109 beiscsi_log(phba, KERN_ERR, 1110 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1111 "BM_%d : In ISCSI_OP_REJECT\n"); 1112 break; 1113 case ISCSI_OP_LOGIN_RSP: 1114 case ISCSI_OP_TEXT_RSP: 1115 task = conn->login_task; 1116 io_task = task->dd_data; 1117 login_hdr = (struct iscsi_hdr *)ppdu; 1118 login_hdr->itt = io_task->libiscsi_itt; 1119 break; 1120 default: 1121 beiscsi_log(phba, KERN_WARNING, 1122 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1123 "BM_%d : Unrecognized opcode 0x%x in async msg\n", 1124 (ppdu-> 1125 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1126 & PDUBASE_OPCODE_MASK)); 1127 return 1; 1128 } 1129 1130 spin_lock_bh(&session->back_lock); 1131 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1132 spin_unlock_bh(&session->back_lock); 1133 return 0; 1134 } 1135 1136 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 1137 { 1138 struct sgl_handle *psgl_handle; 1139 1140 if (phba->io_sgl_hndl_avbl) { 1141 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1142 "BM_%d : In alloc_io_sgl_handle," 1143 " io_sgl_alloc_index=%d\n", 1144 phba->io_sgl_alloc_index); 1145 1146 psgl_handle = phba->io_sgl_hndl_base[phba-> 1147 io_sgl_alloc_index]; 1148 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1149 phba->io_sgl_hndl_avbl--; 1150 if (phba->io_sgl_alloc_index == (phba->params. 1151 ios_per_ctrl - 1)) 1152 phba->io_sgl_alloc_index = 0; 1153 else 1154 phba->io_sgl_alloc_index++; 1155 } else 1156 psgl_handle = NULL; 1157 return psgl_handle; 1158 } 1159 1160 static void 1161 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1162 { 1163 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1164 "BM_%d : In free_,io_sgl_free_index=%d\n", 1165 phba->io_sgl_free_index); 1166 1167 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1168 /* 1169 * this can happen if clean_task is called on a task that 1170 * failed in xmit_task or alloc_pdu. 1171 */ 1172 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1173 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 1174 "value there=%p\n", phba->io_sgl_free_index, 1175 phba->io_sgl_hndl_base 1176 [phba->io_sgl_free_index]); 1177 return; 1178 } 1179 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1180 phba->io_sgl_hndl_avbl++; 1181 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 1182 phba->io_sgl_free_index = 0; 1183 else 1184 phba->io_sgl_free_index++; 1185 } 1186 1187 /** 1188 * alloc_wrb_handle - To allocate a wrb handle 1189 * @phba: The hba pointer 1190 * @cid: The cid to use for allocation 1191 * 1192 * This happens under session_lock until submission to chip 1193 */ 1194 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) 1195 { 1196 struct hwi_wrb_context *pwrb_context; 1197 struct hwi_controller *phwi_ctrlr; 1198 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1199 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1200 1201 phwi_ctrlr = phba->phwi_ctrlr; 1202 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1203 if (pwrb_context->wrb_handles_available >= 2) { 1204 pwrb_handle = pwrb_context->pwrb_handle_base[ 1205 pwrb_context->alloc_index]; 1206 pwrb_context->wrb_handles_available--; 1207 if (pwrb_context->alloc_index == 1208 (phba->params.wrbs_per_cxn - 1)) 1209 pwrb_context->alloc_index = 0; 1210 else 1211 pwrb_context->alloc_index++; 1212 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ 1213 pwrb_context->alloc_index]; 1214 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; 1215 } else 1216 pwrb_handle = NULL; 1217 return pwrb_handle; 1218 } 1219 1220 /** 1221 * free_wrb_handle - To free the wrb handle back to pool 1222 * @phba: The hba pointer 1223 * @pwrb_context: The context to free from 1224 * @pwrb_handle: The wrb_handle to free 1225 * 1226 * This happens under session_lock until submission to chip 1227 */ 1228 static void 1229 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1230 struct wrb_handle *pwrb_handle) 1231 { 1232 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1233 pwrb_context->wrb_handles_available++; 1234 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1235 pwrb_context->free_index = 0; 1236 else 1237 pwrb_context->free_index++; 1238 1239 beiscsi_log(phba, KERN_INFO, 1240 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1241 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1242 "wrb_handles_available=%d\n", 1243 pwrb_handle, pwrb_context->free_index, 1244 pwrb_context->wrb_handles_available); 1245 } 1246 1247 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1248 { 1249 struct sgl_handle *psgl_handle; 1250 1251 if (phba->eh_sgl_hndl_avbl) { 1252 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1253 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1254 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1255 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1256 phba->eh_sgl_alloc_index, 1257 phba->eh_sgl_alloc_index); 1258 1259 phba->eh_sgl_hndl_avbl--; 1260 if (phba->eh_sgl_alloc_index == 1261 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1262 1)) 1263 phba->eh_sgl_alloc_index = 0; 1264 else 1265 phba->eh_sgl_alloc_index++; 1266 } else 1267 psgl_handle = NULL; 1268 return psgl_handle; 1269 } 1270 1271 void 1272 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1273 { 1274 1275 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1276 "BM_%d : In free_mgmt_sgl_handle," 1277 "eh_sgl_free_index=%d\n", 1278 phba->eh_sgl_free_index); 1279 1280 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1281 /* 1282 * this can happen if clean_task is called on a task that 1283 * failed in xmit_task or alloc_pdu. 1284 */ 1285 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1286 "BM_%d : Double Free in eh SGL ," 1287 "eh_sgl_free_index=%d\n", 1288 phba->eh_sgl_free_index); 1289 return; 1290 } 1291 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1292 phba->eh_sgl_hndl_avbl++; 1293 if (phba->eh_sgl_free_index == 1294 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1295 phba->eh_sgl_free_index = 0; 1296 else 1297 phba->eh_sgl_free_index++; 1298 } 1299 1300 static void 1301 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1302 struct iscsi_task *task, 1303 struct common_sol_cqe *csol_cqe) 1304 { 1305 struct beiscsi_io_task *io_task = task->dd_data; 1306 struct be_status_bhs *sts_bhs = 1307 (struct be_status_bhs *)io_task->cmd_bhs; 1308 struct iscsi_conn *conn = beiscsi_conn->conn; 1309 unsigned char *sense; 1310 u32 resid = 0, exp_cmdsn, max_cmdsn; 1311 u8 rsp, status, flags; 1312 1313 exp_cmdsn = csol_cqe->exp_cmdsn; 1314 max_cmdsn = (csol_cqe->exp_cmdsn + 1315 csol_cqe->cmd_wnd - 1); 1316 rsp = csol_cqe->i_resp; 1317 status = csol_cqe->i_sts; 1318 flags = csol_cqe->i_flags; 1319 resid = csol_cqe->res_cnt; 1320 1321 if (!task->sc) { 1322 if (io_task->scsi_cmnd) { 1323 scsi_dma_unmap(io_task->scsi_cmnd); 1324 io_task->scsi_cmnd = NULL; 1325 } 1326 1327 return; 1328 } 1329 task->sc->result = (DID_OK << 16) | status; 1330 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1331 task->sc->result = DID_ERROR << 16; 1332 goto unmap; 1333 } 1334 1335 /* bidi not initially supported */ 1336 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1337 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1338 task->sc->result = DID_ERROR << 16; 1339 1340 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1341 scsi_set_resid(task->sc, resid); 1342 if (!status && (scsi_bufflen(task->sc) - resid < 1343 task->sc->underflow)) 1344 task->sc->result = DID_ERROR << 16; 1345 } 1346 } 1347 1348 if (status == SAM_STAT_CHECK_CONDITION) { 1349 u16 sense_len; 1350 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1351 1352 sense = sts_bhs->sense_info + sizeof(unsigned short); 1353 sense_len = be16_to_cpu(*slen); 1354 memcpy(task->sc->sense_buffer, sense, 1355 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1356 } 1357 1358 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1359 conn->rxdata_octets += resid; 1360 unmap: 1361 scsi_dma_unmap(io_task->scsi_cmnd); 1362 io_task->scsi_cmnd = NULL; 1363 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1364 } 1365 1366 static void 1367 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1368 struct iscsi_task *task, 1369 struct common_sol_cqe *csol_cqe) 1370 { 1371 struct iscsi_logout_rsp *hdr; 1372 struct beiscsi_io_task *io_task = task->dd_data; 1373 struct iscsi_conn *conn = beiscsi_conn->conn; 1374 1375 hdr = (struct iscsi_logout_rsp *)task->hdr; 1376 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1377 hdr->t2wait = 5; 1378 hdr->t2retain = 0; 1379 hdr->flags = csol_cqe->i_flags; 1380 hdr->response = csol_cqe->i_resp; 1381 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1382 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1383 csol_cqe->cmd_wnd - 1); 1384 1385 hdr->dlength[0] = 0; 1386 hdr->dlength[1] = 0; 1387 hdr->dlength[2] = 0; 1388 hdr->hlength = 0; 1389 hdr->itt = io_task->libiscsi_itt; 1390 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1391 } 1392 1393 static void 1394 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1395 struct iscsi_task *task, 1396 struct common_sol_cqe *csol_cqe) 1397 { 1398 struct iscsi_tm_rsp *hdr; 1399 struct iscsi_conn *conn = beiscsi_conn->conn; 1400 struct beiscsi_io_task *io_task = task->dd_data; 1401 1402 hdr = (struct iscsi_tm_rsp *)task->hdr; 1403 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1404 hdr->flags = csol_cqe->i_flags; 1405 hdr->response = csol_cqe->i_resp; 1406 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1407 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1408 csol_cqe->cmd_wnd - 1); 1409 1410 hdr->itt = io_task->libiscsi_itt; 1411 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1412 } 1413 1414 static void 1415 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1416 struct beiscsi_hba *phba, struct sol_cqe *psol) 1417 { 1418 struct hwi_wrb_context *pwrb_context; 1419 struct wrb_handle *pwrb_handle = NULL; 1420 struct hwi_controller *phwi_ctrlr; 1421 struct iscsi_task *task; 1422 struct beiscsi_io_task *io_task; 1423 uint16_t wrb_index, cid, cri_index; 1424 1425 phwi_ctrlr = phba->phwi_ctrlr; 1426 if (is_chip_be2_be3r(phba)) { 1427 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1428 wrb_idx, psol); 1429 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1430 cid, psol); 1431 } else { 1432 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1433 wrb_idx, psol); 1434 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1435 cid, psol); 1436 } 1437 1438 cri_index = BE_GET_CRI_FROM_CID(cid); 1439 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1440 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1441 task = pwrb_handle->pio_handle; 1442 1443 io_task = task->dd_data; 1444 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); 1445 iscsi_put_task(task); 1446 } 1447 1448 static void 1449 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1450 struct iscsi_task *task, 1451 struct common_sol_cqe *csol_cqe) 1452 { 1453 struct iscsi_nopin *hdr; 1454 struct iscsi_conn *conn = beiscsi_conn->conn; 1455 struct beiscsi_io_task *io_task = task->dd_data; 1456 1457 hdr = (struct iscsi_nopin *)task->hdr; 1458 hdr->flags = csol_cqe->i_flags; 1459 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1460 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1461 csol_cqe->cmd_wnd - 1); 1462 1463 hdr->opcode = ISCSI_OP_NOOP_IN; 1464 hdr->itt = io_task->libiscsi_itt; 1465 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1466 } 1467 1468 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1469 struct sol_cqe *psol, 1470 struct common_sol_cqe *csol_cqe) 1471 { 1472 if (is_chip_be2_be3r(phba)) { 1473 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1474 i_exp_cmd_sn, psol); 1475 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1476 i_res_cnt, psol); 1477 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1478 i_cmd_wnd, psol); 1479 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1480 wrb_index, psol); 1481 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1482 cid, psol); 1483 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1484 hw_sts, psol); 1485 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1486 i_resp, psol); 1487 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1488 i_sts, psol); 1489 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1490 i_flags, psol); 1491 } else { 1492 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1493 i_exp_cmd_sn, psol); 1494 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1495 i_res_cnt, psol); 1496 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1497 wrb_index, psol); 1498 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1499 cid, psol); 1500 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1501 hw_sts, psol); 1502 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1503 i_cmd_wnd, psol); 1504 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1505 cmd_cmpl, psol)) 1506 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1507 i_sts, psol); 1508 else 1509 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1510 i_sts, psol); 1511 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1512 u, psol)) 1513 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1514 1515 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1516 o, psol)) 1517 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1518 } 1519 } 1520 1521 1522 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1523 struct beiscsi_hba *phba, struct sol_cqe *psol) 1524 { 1525 struct hwi_wrb_context *pwrb_context; 1526 struct wrb_handle *pwrb_handle; 1527 struct iscsi_wrb *pwrb = NULL; 1528 struct hwi_controller *phwi_ctrlr; 1529 struct iscsi_task *task; 1530 unsigned int type; 1531 struct iscsi_conn *conn = beiscsi_conn->conn; 1532 struct iscsi_session *session = conn->session; 1533 struct common_sol_cqe csol_cqe = {0}; 1534 uint16_t cri_index = 0; 1535 1536 phwi_ctrlr = phba->phwi_ctrlr; 1537 1538 /* Copy the elements to a common structure */ 1539 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1540 1541 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1542 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1543 1544 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1545 csol_cqe.wrb_index]; 1546 1547 task = pwrb_handle->pio_handle; 1548 pwrb = pwrb_handle->pwrb; 1549 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1550 1551 spin_lock_bh(&session->back_lock); 1552 switch (type) { 1553 case HWH_TYPE_IO: 1554 case HWH_TYPE_IO_RD: 1555 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1556 ISCSI_OP_NOOP_OUT) 1557 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1558 else 1559 be_complete_io(beiscsi_conn, task, &csol_cqe); 1560 break; 1561 1562 case HWH_TYPE_LOGOUT: 1563 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1564 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1565 else 1566 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1567 break; 1568 1569 case HWH_TYPE_LOGIN: 1570 beiscsi_log(phba, KERN_ERR, 1571 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1572 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1573 " hwi_complete_cmd- Solicited path\n"); 1574 break; 1575 1576 case HWH_TYPE_NOP: 1577 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1578 break; 1579 1580 default: 1581 beiscsi_log(phba, KERN_WARNING, 1582 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1583 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1584 "wrb_index 0x%x CID 0x%x\n", type, 1585 csol_cqe.wrb_index, 1586 csol_cqe.cid); 1587 break; 1588 } 1589 1590 spin_unlock_bh(&session->back_lock); 1591 } 1592 1593 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1594 *pasync_ctx, unsigned int is_header, 1595 unsigned int host_write_ptr) 1596 { 1597 if (is_header) 1598 return &pasync_ctx->async_entry[host_write_ptr]. 1599 header_busy_list; 1600 else 1601 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1602 } 1603 1604 static struct async_pdu_handle * 1605 hwi_get_async_handle(struct beiscsi_hba *phba, 1606 struct beiscsi_conn *beiscsi_conn, 1607 struct hwi_async_pdu_context *pasync_ctx, 1608 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1609 { 1610 struct be_bus_address phys_addr; 1611 struct list_head *pbusy_list; 1612 struct async_pdu_handle *pasync_handle = NULL; 1613 unsigned char is_header = 0; 1614 unsigned int index, dpl; 1615 1616 if (is_chip_be2_be3r(phba)) { 1617 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1618 dpl, pdpdu_cqe); 1619 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1620 index, pdpdu_cqe); 1621 } else { 1622 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1623 dpl, pdpdu_cqe); 1624 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1625 index, pdpdu_cqe); 1626 } 1627 1628 phys_addr.u.a32.address_lo = 1629 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1630 db_addr_lo) / 32] - dpl); 1631 phys_addr.u.a32.address_hi = 1632 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1633 db_addr_hi) / 32]; 1634 1635 phys_addr.u.a64.address = 1636 *((unsigned long long *)(&phys_addr.u.a64.address)); 1637 1638 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1639 & PDUCQE_CODE_MASK) { 1640 case UNSOL_HDR_NOTIFY: 1641 is_header = 1; 1642 1643 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1644 is_header, index); 1645 break; 1646 case UNSOL_DATA_NOTIFY: 1647 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1648 is_header, index); 1649 break; 1650 default: 1651 pbusy_list = NULL; 1652 beiscsi_log(phba, KERN_WARNING, 1653 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1654 "BM_%d : Unexpected code=%d\n", 1655 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1656 code) / 32] & PDUCQE_CODE_MASK); 1657 return NULL; 1658 } 1659 1660 WARN_ON(list_empty(pbusy_list)); 1661 list_for_each_entry(pasync_handle, pbusy_list, link) { 1662 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1663 break; 1664 } 1665 1666 WARN_ON(!pasync_handle); 1667 1668 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( 1669 beiscsi_conn->beiscsi_conn_cid); 1670 pasync_handle->is_header = is_header; 1671 pasync_handle->buffer_len = dpl; 1672 *pcq_index = index; 1673 1674 return pasync_handle; 1675 } 1676 1677 static unsigned int 1678 hwi_update_async_writables(struct beiscsi_hba *phba, 1679 struct hwi_async_pdu_context *pasync_ctx, 1680 unsigned int is_header, unsigned int cq_index) 1681 { 1682 struct list_head *pbusy_list; 1683 struct async_pdu_handle *pasync_handle; 1684 unsigned int num_entries, writables = 0; 1685 unsigned int *pep_read_ptr, *pwritables; 1686 1687 num_entries = pasync_ctx->num_entries; 1688 if (is_header) { 1689 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1690 pwritables = &pasync_ctx->async_header.writables; 1691 } else { 1692 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1693 pwritables = &pasync_ctx->async_data.writables; 1694 } 1695 1696 while ((*pep_read_ptr) != cq_index) { 1697 (*pep_read_ptr)++; 1698 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1699 1700 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1701 *pep_read_ptr); 1702 if (writables == 0) 1703 WARN_ON(list_empty(pbusy_list)); 1704 1705 if (!list_empty(pbusy_list)) { 1706 pasync_handle = list_entry(pbusy_list->next, 1707 struct async_pdu_handle, 1708 link); 1709 WARN_ON(!pasync_handle); 1710 pasync_handle->consumed = 1; 1711 } 1712 1713 writables++; 1714 } 1715 1716 if (!writables) { 1717 beiscsi_log(phba, KERN_ERR, 1718 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1719 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1720 cq_index); 1721 WARN_ON(1); 1722 } 1723 1724 *pwritables = *pwritables + writables; 1725 return 0; 1726 } 1727 1728 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1729 struct hwi_async_pdu_context *pasync_ctx, 1730 unsigned int cri) 1731 { 1732 struct async_pdu_handle *pasync_handle, *tmp_handle; 1733 struct list_head *plist; 1734 1735 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1736 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1737 list_del(&pasync_handle->link); 1738 1739 if (pasync_handle->is_header) { 1740 list_add_tail(&pasync_handle->link, 1741 &pasync_ctx->async_header.free_list); 1742 pasync_ctx->async_header.free_entries++; 1743 } else { 1744 list_add_tail(&pasync_handle->link, 1745 &pasync_ctx->async_data.free_list); 1746 pasync_ctx->async_data.free_entries++; 1747 } 1748 } 1749 1750 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1751 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1752 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1753 } 1754 1755 static struct phys_addr * 1756 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1757 unsigned int is_header, unsigned int host_write_ptr) 1758 { 1759 struct phys_addr *pasync_sge = NULL; 1760 1761 if (is_header) 1762 pasync_sge = pasync_ctx->async_header.ring_base; 1763 else 1764 pasync_sge = pasync_ctx->async_data.ring_base; 1765 1766 return pasync_sge + host_write_ptr; 1767 } 1768 1769 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1770 unsigned int is_header, uint8_t ulp_num) 1771 { 1772 struct hwi_controller *phwi_ctrlr; 1773 struct hwi_async_pdu_context *pasync_ctx; 1774 struct async_pdu_handle *pasync_handle; 1775 struct list_head *pfree_link, *pbusy_list; 1776 struct phys_addr *pasync_sge; 1777 unsigned int ring_id, num_entries; 1778 unsigned int host_write_num, doorbell_offset; 1779 unsigned int writables; 1780 unsigned int i = 0; 1781 u32 doorbell = 0; 1782 1783 phwi_ctrlr = phba->phwi_ctrlr; 1784 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1785 num_entries = pasync_ctx->num_entries; 1786 1787 if (is_header) { 1788 writables = min(pasync_ctx->async_header.writables, 1789 pasync_ctx->async_header.free_entries); 1790 pfree_link = pasync_ctx->async_header.free_list.next; 1791 host_write_num = pasync_ctx->async_header.host_write_ptr; 1792 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1793 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1794 doorbell_offset; 1795 } else { 1796 writables = min(pasync_ctx->async_data.writables, 1797 pasync_ctx->async_data.free_entries); 1798 pfree_link = pasync_ctx->async_data.free_list.next; 1799 host_write_num = pasync_ctx->async_data.host_write_ptr; 1800 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1801 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1802 doorbell_offset; 1803 } 1804 1805 writables = (writables / 8) * 8; 1806 if (writables) { 1807 for (i = 0; i < writables; i++) { 1808 pbusy_list = 1809 hwi_get_async_busy_list(pasync_ctx, is_header, 1810 host_write_num); 1811 pasync_handle = 1812 list_entry(pfree_link, struct async_pdu_handle, 1813 link); 1814 WARN_ON(!pasync_handle); 1815 pasync_handle->consumed = 0; 1816 1817 pfree_link = pfree_link->next; 1818 1819 pasync_sge = hwi_get_ring_address(pasync_ctx, 1820 is_header, host_write_num); 1821 1822 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1823 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1824 1825 list_move(&pasync_handle->link, pbusy_list); 1826 1827 host_write_num++; 1828 host_write_num = host_write_num % num_entries; 1829 } 1830 1831 if (is_header) { 1832 pasync_ctx->async_header.host_write_ptr = 1833 host_write_num; 1834 pasync_ctx->async_header.free_entries -= writables; 1835 pasync_ctx->async_header.writables -= writables; 1836 pasync_ctx->async_header.busy_entries += writables; 1837 } else { 1838 pasync_ctx->async_data.host_write_ptr = host_write_num; 1839 pasync_ctx->async_data.free_entries -= writables; 1840 pasync_ctx->async_data.writables -= writables; 1841 pasync_ctx->async_data.busy_entries += writables; 1842 } 1843 1844 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1845 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1846 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1847 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1848 << DB_DEF_PDU_CQPROC_SHIFT; 1849 1850 iowrite32(doorbell, phba->db_va + doorbell_offset); 1851 } 1852 } 1853 1854 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1855 struct beiscsi_conn *beiscsi_conn, 1856 struct i_t_dpdu_cqe *pdpdu_cqe) 1857 { 1858 struct hwi_controller *phwi_ctrlr; 1859 struct hwi_async_pdu_context *pasync_ctx; 1860 struct async_pdu_handle *pasync_handle = NULL; 1861 unsigned int cq_index = -1; 1862 uint16_t cri_index = BE_GET_CRI_FROM_CID( 1863 beiscsi_conn->beiscsi_conn_cid); 1864 1865 phwi_ctrlr = phba->phwi_ctrlr; 1866 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1867 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1868 cri_index)); 1869 1870 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1871 pdpdu_cqe, &cq_index); 1872 BUG_ON(pasync_handle->is_header != 0); 1873 if (pasync_handle->consumed == 0) 1874 hwi_update_async_writables(phba, pasync_ctx, 1875 pasync_handle->is_header, cq_index); 1876 1877 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); 1878 hwi_post_async_buffers(phba, pasync_handle->is_header, 1879 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1880 cri_index)); 1881 } 1882 1883 static unsigned int 1884 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1885 struct beiscsi_hba *phba, 1886 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1887 { 1888 struct list_head *plist; 1889 struct async_pdu_handle *pasync_handle; 1890 void *phdr = NULL; 1891 unsigned int hdr_len = 0, buf_len = 0; 1892 unsigned int status, index = 0, offset = 0; 1893 void *pfirst_buffer = NULL; 1894 unsigned int num_buf = 0; 1895 1896 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1897 1898 list_for_each_entry(pasync_handle, plist, link) { 1899 if (index == 0) { 1900 phdr = pasync_handle->pbuffer; 1901 hdr_len = pasync_handle->buffer_len; 1902 } else { 1903 buf_len = pasync_handle->buffer_len; 1904 if (!num_buf) { 1905 pfirst_buffer = pasync_handle->pbuffer; 1906 num_buf++; 1907 } 1908 memcpy(pfirst_buffer + offset, 1909 pasync_handle->pbuffer, buf_len); 1910 offset += buf_len; 1911 } 1912 index++; 1913 } 1914 1915 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1916 phdr, hdr_len, pfirst_buffer, 1917 offset); 1918 1919 hwi_free_async_msg(phba, pasync_ctx, cri); 1920 return 0; 1921 } 1922 1923 static unsigned int 1924 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1925 struct beiscsi_hba *phba, 1926 struct async_pdu_handle *pasync_handle) 1927 { 1928 struct hwi_async_pdu_context *pasync_ctx; 1929 struct hwi_controller *phwi_ctrlr; 1930 unsigned int bytes_needed = 0, status = 0; 1931 unsigned short cri = pasync_handle->cri; 1932 struct pdu_base *ppdu; 1933 1934 phwi_ctrlr = phba->phwi_ctrlr; 1935 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1936 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1937 BE_GET_CRI_FROM_CID(beiscsi_conn-> 1938 beiscsi_conn_cid))); 1939 1940 list_del(&pasync_handle->link); 1941 if (pasync_handle->is_header) { 1942 pasync_ctx->async_header.busy_entries--; 1943 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1944 hwi_free_async_msg(phba, pasync_ctx, cri); 1945 BUG(); 1946 } 1947 1948 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1949 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1950 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1951 (unsigned short)pasync_handle->buffer_len; 1952 list_add_tail(&pasync_handle->link, 1953 &pasync_ctx->async_entry[cri].wait_queue.list); 1954 1955 ppdu = pasync_handle->pbuffer; 1956 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1957 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1958 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1959 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1960 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1961 1962 if (status == 0) { 1963 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1964 bytes_needed; 1965 1966 if (bytes_needed == 0) 1967 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1968 pasync_ctx, cri); 1969 } 1970 } else { 1971 pasync_ctx->async_data.busy_entries--; 1972 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1973 list_add_tail(&pasync_handle->link, 1974 &pasync_ctx->async_entry[cri].wait_queue. 1975 list); 1976 pasync_ctx->async_entry[cri].wait_queue. 1977 bytes_received += 1978 (unsigned short)pasync_handle->buffer_len; 1979 1980 if (pasync_ctx->async_entry[cri].wait_queue. 1981 bytes_received >= 1982 pasync_ctx->async_entry[cri].wait_queue. 1983 bytes_needed) 1984 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1985 pasync_ctx, cri); 1986 } 1987 } 1988 return status; 1989 } 1990 1991 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 1992 struct beiscsi_hba *phba, 1993 struct i_t_dpdu_cqe *pdpdu_cqe) 1994 { 1995 struct hwi_controller *phwi_ctrlr; 1996 struct hwi_async_pdu_context *pasync_ctx; 1997 struct async_pdu_handle *pasync_handle = NULL; 1998 unsigned int cq_index = -1; 1999 uint16_t cri_index = BE_GET_CRI_FROM_CID( 2000 beiscsi_conn->beiscsi_conn_cid); 2001 2002 phwi_ctrlr = phba->phwi_ctrlr; 2003 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 2004 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 2005 cri_index)); 2006 2007 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 2008 pdpdu_cqe, &cq_index); 2009 2010 if (pasync_handle->consumed == 0) 2011 hwi_update_async_writables(phba, pasync_ctx, 2012 pasync_handle->is_header, cq_index); 2013 2014 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 2015 hwi_post_async_buffers(phba, pasync_handle->is_header, 2016 BEISCSI_GET_ULP_FROM_CRI( 2017 phwi_ctrlr, cri_index)); 2018 } 2019 2020 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 2021 { 2022 struct be_queue_info *mcc_cq; 2023 struct be_mcc_compl *mcc_compl; 2024 unsigned int num_processed = 0; 2025 2026 mcc_cq = &phba->ctrl.mcc_obj.cq; 2027 mcc_compl = queue_tail_node(mcc_cq); 2028 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2029 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 2030 2031 if (num_processed >= 32) { 2032 hwi_ring_cq_db(phba, mcc_cq->id, 2033 num_processed, 0, 0); 2034 num_processed = 0; 2035 } 2036 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 2037 /* Interpret flags as an async trailer */ 2038 if (is_link_state_evt(mcc_compl->flags)) 2039 /* Interpret compl as a async link evt */ 2040 beiscsi_async_link_state_process(phba, 2041 (struct be_async_event_link_state *) mcc_compl); 2042 else 2043 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 2044 "BM_%d : Unsupported Async Event, flags" 2045 " = 0x%08x\n", 2046 mcc_compl->flags); 2047 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 2048 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 2049 atomic_dec(&phba->ctrl.mcc_obj.q.used); 2050 } 2051 2052 mcc_compl->flags = 0; 2053 queue_tail_inc(mcc_cq); 2054 mcc_compl = queue_tail_node(mcc_cq); 2055 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2056 num_processed++; 2057 } 2058 2059 if (num_processed > 0) 2060 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 2061 2062 } 2063 2064 /** 2065 * beiscsi_process_cq()- Process the Completion Queue 2066 * @pbe_eq: Event Q on which the Completion has come 2067 * 2068 * return 2069 * Number of Completion Entries processed. 2070 **/ 2071 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 2072 { 2073 struct be_queue_info *cq; 2074 struct sol_cqe *sol; 2075 struct dmsg_cqe *dmsg; 2076 unsigned int num_processed = 0; 2077 unsigned int tot_nump = 0; 2078 unsigned short code = 0, cid = 0; 2079 uint16_t cri_index = 0; 2080 struct beiscsi_conn *beiscsi_conn; 2081 struct beiscsi_endpoint *beiscsi_ep; 2082 struct iscsi_endpoint *ep; 2083 struct beiscsi_hba *phba; 2084 2085 cq = pbe_eq->cq; 2086 sol = queue_tail_node(cq); 2087 phba = pbe_eq->phba; 2088 2089 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 2090 CQE_VALID_MASK) { 2091 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 2092 2093 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 2094 32] & CQE_CODE_MASK); 2095 2096 /* Get the CID */ 2097 if (is_chip_be2_be3r(phba)) { 2098 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 2099 } else { 2100 if ((code == DRIVERMSG_NOTIFY) || 2101 (code == UNSOL_HDR_NOTIFY) || 2102 (code == UNSOL_DATA_NOTIFY)) 2103 cid = AMAP_GET_BITS( 2104 struct amap_i_t_dpdu_cqe_v2, 2105 cid, sol); 2106 else 2107 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2108 cid, sol); 2109 } 2110 2111 cri_index = BE_GET_CRI_FROM_CID(cid); 2112 ep = phba->ep_array[cri_index]; 2113 2114 if (ep == NULL) { 2115 /* connection has already been freed 2116 * just move on to next one 2117 */ 2118 beiscsi_log(phba, KERN_WARNING, 2119 BEISCSI_LOG_INIT, 2120 "BM_%d : proc cqe of disconn ep: cid %d\n", 2121 cid); 2122 goto proc_next_cqe; 2123 } 2124 2125 beiscsi_ep = ep->dd_data; 2126 beiscsi_conn = beiscsi_ep->conn; 2127 2128 if (num_processed >= 32) { 2129 hwi_ring_cq_db(phba, cq->id, 2130 num_processed, 0, 0); 2131 tot_nump += num_processed; 2132 num_processed = 0; 2133 } 2134 2135 switch (code) { 2136 case SOL_CMD_COMPLETE: 2137 hwi_complete_cmd(beiscsi_conn, phba, sol); 2138 break; 2139 case DRIVERMSG_NOTIFY: 2140 beiscsi_log(phba, KERN_INFO, 2141 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2142 "BM_%d : Received %s[%d] on CID : %d\n", 2143 cqe_desc[code], code, cid); 2144 2145 dmsg = (struct dmsg_cqe *)sol; 2146 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 2147 break; 2148 case UNSOL_HDR_NOTIFY: 2149 beiscsi_log(phba, KERN_INFO, 2150 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2151 "BM_%d : Received %s[%d] on CID : %d\n", 2152 cqe_desc[code], code, cid); 2153 2154 spin_lock_bh(&phba->async_pdu_lock); 2155 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2156 (struct i_t_dpdu_cqe *)sol); 2157 spin_unlock_bh(&phba->async_pdu_lock); 2158 break; 2159 case UNSOL_DATA_NOTIFY: 2160 beiscsi_log(phba, KERN_INFO, 2161 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2162 "BM_%d : Received %s[%d] on CID : %d\n", 2163 cqe_desc[code], code, cid); 2164 2165 spin_lock_bh(&phba->async_pdu_lock); 2166 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2167 (struct i_t_dpdu_cqe *)sol); 2168 spin_unlock_bh(&phba->async_pdu_lock); 2169 break; 2170 case CXN_INVALIDATE_INDEX_NOTIFY: 2171 case CMD_INVALIDATED_NOTIFY: 2172 case CXN_INVALIDATE_NOTIFY: 2173 beiscsi_log(phba, KERN_ERR, 2174 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2175 "BM_%d : Ignoring %s[%d] on CID : %d\n", 2176 cqe_desc[code], code, cid); 2177 break; 2178 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 2179 case CMD_KILLED_INVALID_STATSN_RCVD: 2180 case CMD_KILLED_INVALID_R2T_RCVD: 2181 case CMD_CXN_KILLED_LUN_INVALID: 2182 case CMD_CXN_KILLED_ICD_INVALID: 2183 case CMD_CXN_KILLED_ITT_INVALID: 2184 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 2185 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 2186 beiscsi_log(phba, KERN_ERR, 2187 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2188 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 2189 cqe_desc[code], code, cid); 2190 break; 2191 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2192 beiscsi_log(phba, KERN_ERR, 2193 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2194 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2195 cqe_desc[code], code, cid); 2196 spin_lock_bh(&phba->async_pdu_lock); 2197 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2198 (struct i_t_dpdu_cqe *) sol); 2199 spin_unlock_bh(&phba->async_pdu_lock); 2200 break; 2201 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2202 case CXN_KILLED_BURST_LEN_MISMATCH: 2203 case CXN_KILLED_AHS_RCVD: 2204 case CXN_KILLED_HDR_DIGEST_ERR: 2205 case CXN_KILLED_UNKNOWN_HDR: 2206 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2207 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2208 case CXN_KILLED_TIMED_OUT: 2209 case CXN_KILLED_FIN_RCVD: 2210 case CXN_KILLED_RST_SENT: 2211 case CXN_KILLED_RST_RCVD: 2212 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2213 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2214 case CXN_KILLED_OVER_RUN_RESIDUAL: 2215 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2216 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2217 beiscsi_log(phba, KERN_ERR, 2218 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2219 "BM_%d : Event %s[%d] received on CID : %d\n", 2220 cqe_desc[code], code, cid); 2221 if (beiscsi_conn) 2222 iscsi_conn_failure(beiscsi_conn->conn, 2223 ISCSI_ERR_CONN_FAILED); 2224 break; 2225 default: 2226 beiscsi_log(phba, KERN_ERR, 2227 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2228 "BM_%d : Invalid CQE Event Received Code : %d" 2229 "CID 0x%x...\n", 2230 code, cid); 2231 break; 2232 } 2233 2234 proc_next_cqe: 2235 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2236 queue_tail_inc(cq); 2237 sol = queue_tail_node(cq); 2238 num_processed++; 2239 } 2240 2241 if (num_processed > 0) { 2242 tot_nump += num_processed; 2243 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 2244 } 2245 return tot_nump; 2246 } 2247 2248 void beiscsi_process_all_cqs(struct work_struct *work) 2249 { 2250 unsigned long flags; 2251 struct hwi_controller *phwi_ctrlr; 2252 struct hwi_context_memory *phwi_context; 2253 struct beiscsi_hba *phba; 2254 struct be_eq_obj *pbe_eq = 2255 container_of(work, struct be_eq_obj, work_cqs); 2256 2257 phba = pbe_eq->phba; 2258 phwi_ctrlr = phba->phwi_ctrlr; 2259 phwi_context = phwi_ctrlr->phwi_ctxt; 2260 2261 if (pbe_eq->todo_mcc_cq) { 2262 spin_lock_irqsave(&phba->isr_lock, flags); 2263 pbe_eq->todo_mcc_cq = false; 2264 spin_unlock_irqrestore(&phba->isr_lock, flags); 2265 beiscsi_process_mcc_isr(phba); 2266 } 2267 2268 if (pbe_eq->todo_cq) { 2269 spin_lock_irqsave(&phba->isr_lock, flags); 2270 pbe_eq->todo_cq = false; 2271 spin_unlock_irqrestore(&phba->isr_lock, flags); 2272 beiscsi_process_cq(pbe_eq); 2273 } 2274 2275 /* rearm EQ for further interrupts */ 2276 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2277 } 2278 2279 static int be_iopoll(struct blk_iopoll *iop, int budget) 2280 { 2281 unsigned int ret; 2282 struct beiscsi_hba *phba; 2283 struct be_eq_obj *pbe_eq; 2284 2285 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2286 ret = beiscsi_process_cq(pbe_eq); 2287 pbe_eq->cq_count += ret; 2288 if (ret < budget) { 2289 phba = pbe_eq->phba; 2290 blk_iopoll_complete(iop); 2291 beiscsi_log(phba, KERN_INFO, 2292 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2293 "BM_%d : rearm pbe_eq->q.id =%d\n", 2294 pbe_eq->q.id); 2295 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2296 } 2297 return ret; 2298 } 2299 2300 static void 2301 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2302 unsigned int num_sg, struct beiscsi_io_task *io_task) 2303 { 2304 struct iscsi_sge *psgl; 2305 unsigned int sg_len, index; 2306 unsigned int sge_len = 0; 2307 unsigned long long addr; 2308 struct scatterlist *l_sg; 2309 unsigned int offset; 2310 2311 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2312 io_task->bhs_pa.u.a32.address_lo); 2313 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2314 io_task->bhs_pa.u.a32.address_hi); 2315 2316 l_sg = sg; 2317 for (index = 0; (index < num_sg) && (index < 2); index++, 2318 sg = sg_next(sg)) { 2319 if (index == 0) { 2320 sg_len = sg_dma_len(sg); 2321 addr = (u64) sg_dma_address(sg); 2322 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2323 sge0_addr_lo, pwrb, 2324 lower_32_bits(addr)); 2325 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2326 sge0_addr_hi, pwrb, 2327 upper_32_bits(addr)); 2328 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2329 sge0_len, pwrb, 2330 sg_len); 2331 sge_len = sg_len; 2332 } else { 2333 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2334 pwrb, sge_len); 2335 sg_len = sg_dma_len(sg); 2336 addr = (u64) sg_dma_address(sg); 2337 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2338 sge1_addr_lo, pwrb, 2339 lower_32_bits(addr)); 2340 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2341 sge1_addr_hi, pwrb, 2342 upper_32_bits(addr)); 2343 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2344 sge1_len, pwrb, 2345 sg_len); 2346 } 2347 } 2348 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2349 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2350 2351 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2352 2353 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2354 io_task->bhs_pa.u.a32.address_hi); 2355 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2356 io_task->bhs_pa.u.a32.address_lo); 2357 2358 if (num_sg == 1) { 2359 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2360 1); 2361 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2362 0); 2363 } else if (num_sg == 2) { 2364 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2365 0); 2366 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2367 1); 2368 } else { 2369 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2370 0); 2371 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2372 0); 2373 } 2374 2375 sg = l_sg; 2376 psgl++; 2377 psgl++; 2378 offset = 0; 2379 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2380 sg_len = sg_dma_len(sg); 2381 addr = (u64) sg_dma_address(sg); 2382 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2383 lower_32_bits(addr)); 2384 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2385 upper_32_bits(addr)); 2386 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2387 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2388 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2389 offset += sg_len; 2390 } 2391 psgl--; 2392 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2393 } 2394 2395 static void 2396 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2397 unsigned int num_sg, struct beiscsi_io_task *io_task) 2398 { 2399 struct iscsi_sge *psgl; 2400 unsigned int sg_len, index; 2401 unsigned int sge_len = 0; 2402 unsigned long long addr; 2403 struct scatterlist *l_sg; 2404 unsigned int offset; 2405 2406 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2407 io_task->bhs_pa.u.a32.address_lo); 2408 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2409 io_task->bhs_pa.u.a32.address_hi); 2410 2411 l_sg = sg; 2412 for (index = 0; (index < num_sg) && (index < 2); index++, 2413 sg = sg_next(sg)) { 2414 if (index == 0) { 2415 sg_len = sg_dma_len(sg); 2416 addr = (u64) sg_dma_address(sg); 2417 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2418 ((u32)(addr & 0xFFFFFFFF))); 2419 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2420 ((u32)(addr >> 32))); 2421 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2422 sg_len); 2423 sge_len = sg_len; 2424 } else { 2425 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2426 pwrb, sge_len); 2427 sg_len = sg_dma_len(sg); 2428 addr = (u64) sg_dma_address(sg); 2429 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2430 ((u32)(addr & 0xFFFFFFFF))); 2431 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2432 ((u32)(addr >> 32))); 2433 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2434 sg_len); 2435 } 2436 } 2437 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2438 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2439 2440 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2441 2442 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2443 io_task->bhs_pa.u.a32.address_hi); 2444 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2445 io_task->bhs_pa.u.a32.address_lo); 2446 2447 if (num_sg == 1) { 2448 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2449 1); 2450 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2451 0); 2452 } else if (num_sg == 2) { 2453 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2454 0); 2455 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2456 1); 2457 } else { 2458 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2459 0); 2460 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2461 0); 2462 } 2463 sg = l_sg; 2464 psgl++; 2465 psgl++; 2466 offset = 0; 2467 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2468 sg_len = sg_dma_len(sg); 2469 addr = (u64) sg_dma_address(sg); 2470 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2471 (addr & 0xFFFFFFFF)); 2472 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2473 (addr >> 32)); 2474 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2475 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2476 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2477 offset += sg_len; 2478 } 2479 psgl--; 2480 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2481 } 2482 2483 /** 2484 * hwi_write_buffer()- Populate the WRB with task info 2485 * @pwrb: ptr to the WRB entry 2486 * @task: iscsi task which is to be executed 2487 **/ 2488 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2489 { 2490 struct iscsi_sge *psgl; 2491 struct beiscsi_io_task *io_task = task->dd_data; 2492 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2493 struct beiscsi_hba *phba = beiscsi_conn->phba; 2494 uint8_t dsp_value = 0; 2495 2496 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2497 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2498 io_task->bhs_pa.u.a32.address_lo); 2499 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2500 io_task->bhs_pa.u.a32.address_hi); 2501 2502 if (task->data) { 2503 2504 /* Check for the data_count */ 2505 dsp_value = (task->data_count) ? 1 : 0; 2506 2507 if (is_chip_be2_be3r(phba)) 2508 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2509 pwrb, dsp_value); 2510 else 2511 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2512 pwrb, dsp_value); 2513 2514 /* Map addr only if there is data_count */ 2515 if (dsp_value) { 2516 io_task->mtask_addr = pci_map_single(phba->pcidev, 2517 task->data, 2518 task->data_count, 2519 PCI_DMA_TODEVICE); 2520 io_task->mtask_data_count = task->data_count; 2521 } else 2522 io_task->mtask_addr = 0; 2523 2524 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2525 lower_32_bits(io_task->mtask_addr)); 2526 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2527 upper_32_bits(io_task->mtask_addr)); 2528 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2529 task->data_count); 2530 2531 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2532 } else { 2533 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2534 io_task->mtask_addr = 0; 2535 } 2536 2537 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2538 2539 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2540 2541 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2542 io_task->bhs_pa.u.a32.address_hi); 2543 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2544 io_task->bhs_pa.u.a32.address_lo); 2545 if (task->data) { 2546 psgl++; 2547 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2548 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2549 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2550 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2551 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2552 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2553 2554 psgl++; 2555 if (task->data) { 2556 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2557 lower_32_bits(io_task->mtask_addr)); 2558 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2559 upper_32_bits(io_task->mtask_addr)); 2560 } 2561 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2562 } 2563 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2564 } 2565 2566 /** 2567 * beiscsi_find_mem_req()- Find mem needed 2568 * @phba: ptr to HBA struct 2569 **/ 2570 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2571 { 2572 uint8_t mem_descr_index, ulp_num; 2573 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2574 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2575 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2576 2577 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2578 sizeof(struct sol_cqe)); 2579 2580 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2581 2582 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2583 BE_ISCSI_PDU_HEADER_SIZE; 2584 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2585 sizeof(struct hwi_context_memory); 2586 2587 2588 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2589 * (phba->params.wrbs_per_cxn) 2590 * phba->params.cxns_per_ctrl; 2591 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2592 (phba->params.wrbs_per_cxn); 2593 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2594 phba->params.cxns_per_ctrl); 2595 2596 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2597 phba->params.icds_per_ctrl; 2598 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2599 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2600 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2601 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2602 2603 num_async_pdu_buf_sgl_pages = 2604 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2605 phba, ulp_num) * 2606 sizeof(struct phys_addr)); 2607 2608 num_async_pdu_buf_pages = 2609 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2610 phba, ulp_num) * 2611 phba->params.defpdu_hdr_sz); 2612 2613 num_async_pdu_data_pages = 2614 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2615 phba, ulp_num) * 2616 phba->params.defpdu_data_sz); 2617 2618 num_async_pdu_data_sgl_pages = 2619 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2620 phba, ulp_num) * 2621 sizeof(struct phys_addr)); 2622 2623 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2624 (ulp_num * MEM_DESCR_OFFSET)); 2625 phba->mem_req[mem_descr_index] = 2626 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2627 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2628 2629 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2630 (ulp_num * MEM_DESCR_OFFSET)); 2631 phba->mem_req[mem_descr_index] = 2632 num_async_pdu_buf_pages * 2633 PAGE_SIZE; 2634 2635 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2636 (ulp_num * MEM_DESCR_OFFSET)); 2637 phba->mem_req[mem_descr_index] = 2638 num_async_pdu_data_pages * 2639 PAGE_SIZE; 2640 2641 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2642 (ulp_num * MEM_DESCR_OFFSET)); 2643 phba->mem_req[mem_descr_index] = 2644 num_async_pdu_buf_sgl_pages * 2645 PAGE_SIZE; 2646 2647 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2648 (ulp_num * MEM_DESCR_OFFSET)); 2649 phba->mem_req[mem_descr_index] = 2650 num_async_pdu_data_sgl_pages * 2651 PAGE_SIZE; 2652 2653 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2654 (ulp_num * MEM_DESCR_OFFSET)); 2655 phba->mem_req[mem_descr_index] = 2656 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2657 sizeof(struct async_pdu_handle); 2658 2659 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2660 (ulp_num * MEM_DESCR_OFFSET)); 2661 phba->mem_req[mem_descr_index] = 2662 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2663 sizeof(struct async_pdu_handle); 2664 2665 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2666 (ulp_num * MEM_DESCR_OFFSET)); 2667 phba->mem_req[mem_descr_index] = 2668 sizeof(struct hwi_async_pdu_context) + 2669 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2670 sizeof(struct hwi_async_entry)); 2671 } 2672 } 2673 } 2674 2675 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2676 { 2677 dma_addr_t bus_add; 2678 struct hwi_controller *phwi_ctrlr; 2679 struct be_mem_descriptor *mem_descr; 2680 struct mem_array *mem_arr, *mem_arr_orig; 2681 unsigned int i, j, alloc_size, curr_alloc_size; 2682 2683 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2684 if (!phba->phwi_ctrlr) 2685 return -ENOMEM; 2686 2687 /* Allocate memory for wrb_context */ 2688 phwi_ctrlr = phba->phwi_ctrlr; 2689 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2690 phba->params.cxns_per_ctrl, 2691 GFP_KERNEL); 2692 if (!phwi_ctrlr->wrb_context) 2693 return -ENOMEM; 2694 2695 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2696 GFP_KERNEL); 2697 if (!phba->init_mem) { 2698 kfree(phwi_ctrlr->wrb_context); 2699 kfree(phba->phwi_ctrlr); 2700 return -ENOMEM; 2701 } 2702 2703 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2704 GFP_KERNEL); 2705 if (!mem_arr_orig) { 2706 kfree(phba->init_mem); 2707 kfree(phwi_ctrlr->wrb_context); 2708 kfree(phba->phwi_ctrlr); 2709 return -ENOMEM; 2710 } 2711 2712 mem_descr = phba->init_mem; 2713 for (i = 0; i < SE_MEM_MAX; i++) { 2714 if (!phba->mem_req[i]) { 2715 mem_descr->mem_array = NULL; 2716 mem_descr++; 2717 continue; 2718 } 2719 2720 j = 0; 2721 mem_arr = mem_arr_orig; 2722 alloc_size = phba->mem_req[i]; 2723 memset(mem_arr, 0, sizeof(struct mem_array) * 2724 BEISCSI_MAX_FRAGS_INIT); 2725 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2726 do { 2727 mem_arr->virtual_address = pci_alloc_consistent( 2728 phba->pcidev, 2729 curr_alloc_size, 2730 &bus_add); 2731 if (!mem_arr->virtual_address) { 2732 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2733 goto free_mem; 2734 if (curr_alloc_size - 2735 rounddown_pow_of_two(curr_alloc_size)) 2736 curr_alloc_size = rounddown_pow_of_two 2737 (curr_alloc_size); 2738 else 2739 curr_alloc_size = curr_alloc_size / 2; 2740 } else { 2741 mem_arr->bus_address.u. 2742 a64.address = (__u64) bus_add; 2743 mem_arr->size = curr_alloc_size; 2744 alloc_size -= curr_alloc_size; 2745 curr_alloc_size = min(be_max_phys_size * 2746 1024, alloc_size); 2747 j++; 2748 mem_arr++; 2749 } 2750 } while (alloc_size); 2751 mem_descr->num_elements = j; 2752 mem_descr->size_in_bytes = phba->mem_req[i]; 2753 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2754 GFP_KERNEL); 2755 if (!mem_descr->mem_array) 2756 goto free_mem; 2757 2758 memcpy(mem_descr->mem_array, mem_arr_orig, 2759 sizeof(struct mem_array) * j); 2760 mem_descr++; 2761 } 2762 kfree(mem_arr_orig); 2763 return 0; 2764 free_mem: 2765 mem_descr->num_elements = j; 2766 while ((i) || (j)) { 2767 for (j = mem_descr->num_elements; j > 0; j--) { 2768 pci_free_consistent(phba->pcidev, 2769 mem_descr->mem_array[j - 1].size, 2770 mem_descr->mem_array[j - 1]. 2771 virtual_address, 2772 (unsigned long)mem_descr-> 2773 mem_array[j - 1]. 2774 bus_address.u.a64.address); 2775 } 2776 if (i) { 2777 i--; 2778 kfree(mem_descr->mem_array); 2779 mem_descr--; 2780 } 2781 } 2782 kfree(mem_arr_orig); 2783 kfree(phba->init_mem); 2784 kfree(phba->phwi_ctrlr->wrb_context); 2785 kfree(phba->phwi_ctrlr); 2786 return -ENOMEM; 2787 } 2788 2789 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2790 { 2791 beiscsi_find_mem_req(phba); 2792 return beiscsi_alloc_mem(phba); 2793 } 2794 2795 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2796 { 2797 struct pdu_data_out *pdata_out; 2798 struct pdu_nop_out *pnop_out; 2799 struct be_mem_descriptor *mem_descr; 2800 2801 mem_descr = phba->init_mem; 2802 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2803 pdata_out = 2804 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2805 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2806 2807 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2808 IIOC_SCSI_DATA); 2809 2810 pnop_out = 2811 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2812 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2813 2814 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2815 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2816 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2817 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2818 } 2819 2820 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2821 { 2822 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2823 struct hwi_context_memory *phwi_ctxt; 2824 struct wrb_handle *pwrb_handle = NULL; 2825 struct hwi_controller *phwi_ctrlr; 2826 struct hwi_wrb_context *pwrb_context; 2827 struct iscsi_wrb *pwrb = NULL; 2828 unsigned int num_cxn_wrbh = 0; 2829 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2830 2831 mem_descr_wrbh = phba->init_mem; 2832 mem_descr_wrbh += HWI_MEM_WRBH; 2833 2834 mem_descr_wrb = phba->init_mem; 2835 mem_descr_wrb += HWI_MEM_WRB; 2836 phwi_ctrlr = phba->phwi_ctrlr; 2837 2838 /* Allocate memory for WRBQ */ 2839 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2840 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2841 phba->params.cxns_per_ctrl, 2842 GFP_KERNEL); 2843 if (!phwi_ctxt->be_wrbq) { 2844 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2845 "BM_%d : WRBQ Mem Alloc Failed\n"); 2846 return -ENOMEM; 2847 } 2848 2849 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2850 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2851 pwrb_context->pwrb_handle_base = 2852 kzalloc(sizeof(struct wrb_handle *) * 2853 phba->params.wrbs_per_cxn, GFP_KERNEL); 2854 if (!pwrb_context->pwrb_handle_base) { 2855 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2856 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2857 goto init_wrb_hndl_failed; 2858 } 2859 pwrb_context->pwrb_handle_basestd = 2860 kzalloc(sizeof(struct wrb_handle *) * 2861 phba->params.wrbs_per_cxn, GFP_KERNEL); 2862 if (!pwrb_context->pwrb_handle_basestd) { 2863 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2864 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2865 goto init_wrb_hndl_failed; 2866 } 2867 if (!num_cxn_wrbh) { 2868 pwrb_handle = 2869 mem_descr_wrbh->mem_array[idx].virtual_address; 2870 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2871 ((sizeof(struct wrb_handle)) * 2872 phba->params.wrbs_per_cxn)); 2873 idx++; 2874 } 2875 pwrb_context->alloc_index = 0; 2876 pwrb_context->wrb_handles_available = 0; 2877 pwrb_context->free_index = 0; 2878 2879 if (num_cxn_wrbh) { 2880 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2881 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2882 pwrb_context->pwrb_handle_basestd[j] = 2883 pwrb_handle; 2884 pwrb_context->wrb_handles_available++; 2885 pwrb_handle->wrb_index = j; 2886 pwrb_handle++; 2887 } 2888 num_cxn_wrbh--; 2889 } 2890 } 2891 idx = 0; 2892 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2893 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2894 if (!num_cxn_wrb) { 2895 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2896 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2897 ((sizeof(struct iscsi_wrb) * 2898 phba->params.wrbs_per_cxn)); 2899 idx++; 2900 } 2901 2902 if (num_cxn_wrb) { 2903 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2904 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2905 pwrb_handle->pwrb = pwrb; 2906 pwrb++; 2907 } 2908 num_cxn_wrb--; 2909 } 2910 } 2911 return 0; 2912 init_wrb_hndl_failed: 2913 for (j = index; j > 0; j--) { 2914 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2915 kfree(pwrb_context->pwrb_handle_base); 2916 kfree(pwrb_context->pwrb_handle_basestd); 2917 } 2918 return -ENOMEM; 2919 } 2920 2921 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2922 { 2923 uint8_t ulp_num; 2924 struct hwi_controller *phwi_ctrlr; 2925 struct hba_parameters *p = &phba->params; 2926 struct hwi_async_pdu_context *pasync_ctx; 2927 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2928 unsigned int index, idx, num_per_mem, num_async_data; 2929 struct be_mem_descriptor *mem_descr; 2930 2931 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2932 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2933 2934 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2935 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2936 (ulp_num * MEM_DESCR_OFFSET)); 2937 2938 phwi_ctrlr = phba->phwi_ctrlr; 2939 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2940 (struct hwi_async_pdu_context *) 2941 mem_descr->mem_array[0].virtual_address; 2942 2943 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2944 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2945 2946 pasync_ctx->async_entry = 2947 (struct hwi_async_entry *) 2948 ((long unsigned int)pasync_ctx + 2949 sizeof(struct hwi_async_pdu_context)); 2950 2951 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2952 ulp_num); 2953 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2954 2955 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2956 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2957 (ulp_num * MEM_DESCR_OFFSET); 2958 if (mem_descr->mem_array[0].virtual_address) { 2959 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2960 "BM_%d : hwi_init_async_pdu_ctx" 2961 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2962 ulp_num, 2963 mem_descr->mem_array[0]. 2964 virtual_address); 2965 } else 2966 beiscsi_log(phba, KERN_WARNING, 2967 BEISCSI_LOG_INIT, 2968 "BM_%d : No Virtual address for ULP : %d\n", 2969 ulp_num); 2970 2971 pasync_ctx->async_header.va_base = 2972 mem_descr->mem_array[0].virtual_address; 2973 2974 pasync_ctx->async_header.pa_base.u.a64.address = 2975 mem_descr->mem_array[0]. 2976 bus_address.u.a64.address; 2977 2978 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2979 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2980 (ulp_num * MEM_DESCR_OFFSET); 2981 if (mem_descr->mem_array[0].virtual_address) { 2982 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2983 "BM_%d : hwi_init_async_pdu_ctx" 2984 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 2985 ulp_num, 2986 mem_descr->mem_array[0]. 2987 virtual_address); 2988 } else 2989 beiscsi_log(phba, KERN_WARNING, 2990 BEISCSI_LOG_INIT, 2991 "BM_%d : No Virtual address for ULP : %d\n", 2992 ulp_num); 2993 2994 pasync_ctx->async_header.ring_base = 2995 mem_descr->mem_array[0].virtual_address; 2996 2997 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2998 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2999 (ulp_num * MEM_DESCR_OFFSET); 3000 if (mem_descr->mem_array[0].virtual_address) { 3001 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3002 "BM_%d : hwi_init_async_pdu_ctx" 3003 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 3004 ulp_num, 3005 mem_descr->mem_array[0]. 3006 virtual_address); 3007 } else 3008 beiscsi_log(phba, KERN_WARNING, 3009 BEISCSI_LOG_INIT, 3010 "BM_%d : No Virtual address for ULP : %d\n", 3011 ulp_num); 3012 3013 pasync_ctx->async_header.handle_base = 3014 mem_descr->mem_array[0].virtual_address; 3015 pasync_ctx->async_header.writables = 0; 3016 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 3017 3018 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3019 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3020 (ulp_num * MEM_DESCR_OFFSET); 3021 if (mem_descr->mem_array[0].virtual_address) { 3022 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3023 "BM_%d : hwi_init_async_pdu_ctx" 3024 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 3025 ulp_num, 3026 mem_descr->mem_array[0]. 3027 virtual_address); 3028 } else 3029 beiscsi_log(phba, KERN_WARNING, 3030 BEISCSI_LOG_INIT, 3031 "BM_%d : No Virtual address for ULP : %d\n", 3032 ulp_num); 3033 3034 pasync_ctx->async_data.ring_base = 3035 mem_descr->mem_array[0].virtual_address; 3036 3037 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3038 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 3039 (ulp_num * MEM_DESCR_OFFSET); 3040 if (!mem_descr->mem_array[0].virtual_address) 3041 beiscsi_log(phba, KERN_WARNING, 3042 BEISCSI_LOG_INIT, 3043 "BM_%d : No Virtual address for ULP : %d\n", 3044 ulp_num); 3045 3046 pasync_ctx->async_data.handle_base = 3047 mem_descr->mem_array[0].virtual_address; 3048 pasync_ctx->async_data.writables = 0; 3049 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 3050 3051 pasync_header_h = 3052 (struct async_pdu_handle *) 3053 pasync_ctx->async_header.handle_base; 3054 pasync_data_h = 3055 (struct async_pdu_handle *) 3056 pasync_ctx->async_data.handle_base; 3057 3058 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3059 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 3060 (ulp_num * MEM_DESCR_OFFSET); 3061 if (mem_descr->mem_array[0].virtual_address) { 3062 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3063 "BM_%d : hwi_init_async_pdu_ctx" 3064 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 3065 ulp_num, 3066 mem_descr->mem_array[0]. 3067 virtual_address); 3068 } else 3069 beiscsi_log(phba, KERN_WARNING, 3070 BEISCSI_LOG_INIT, 3071 "BM_%d : No Virtual address for ULP : %d\n", 3072 ulp_num); 3073 3074 idx = 0; 3075 pasync_ctx->async_data.va_base = 3076 mem_descr->mem_array[idx].virtual_address; 3077 pasync_ctx->async_data.pa_base.u.a64.address = 3078 mem_descr->mem_array[idx]. 3079 bus_address.u.a64.address; 3080 3081 num_async_data = ((mem_descr->mem_array[idx].size) / 3082 phba->params.defpdu_data_sz); 3083 num_per_mem = 0; 3084 3085 for (index = 0; index < BEISCSI_GET_CID_COUNT 3086 (phba, ulp_num); index++) { 3087 pasync_header_h->cri = -1; 3088 pasync_header_h->index = (char)index; 3089 INIT_LIST_HEAD(&pasync_header_h->link); 3090 pasync_header_h->pbuffer = 3091 (void *)((unsigned long) 3092 (pasync_ctx-> 3093 async_header.va_base) + 3094 (p->defpdu_hdr_sz * index)); 3095 3096 pasync_header_h->pa.u.a64.address = 3097 pasync_ctx->async_header.pa_base.u.a64. 3098 address + (p->defpdu_hdr_sz * index); 3099 3100 list_add_tail(&pasync_header_h->link, 3101 &pasync_ctx->async_header. 3102 free_list); 3103 pasync_header_h++; 3104 pasync_ctx->async_header.free_entries++; 3105 pasync_ctx->async_header.writables++; 3106 3107 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3108 wait_queue.list); 3109 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3110 header_busy_list); 3111 pasync_data_h->cri = -1; 3112 pasync_data_h->index = (char)index; 3113 INIT_LIST_HEAD(&pasync_data_h->link); 3114 3115 if (!num_async_data) { 3116 num_per_mem = 0; 3117 idx++; 3118 pasync_ctx->async_data.va_base = 3119 mem_descr->mem_array[idx]. 3120 virtual_address; 3121 pasync_ctx->async_data.pa_base.u. 3122 a64.address = 3123 mem_descr->mem_array[idx]. 3124 bus_address.u.a64.address; 3125 num_async_data = 3126 ((mem_descr->mem_array[idx]. 3127 size) / 3128 phba->params.defpdu_data_sz); 3129 } 3130 pasync_data_h->pbuffer = 3131 (void *)((unsigned long) 3132 (pasync_ctx->async_data.va_base) + 3133 (p->defpdu_data_sz * num_per_mem)); 3134 3135 pasync_data_h->pa.u.a64.address = 3136 pasync_ctx->async_data.pa_base.u.a64. 3137 address + (p->defpdu_data_sz * 3138 num_per_mem); 3139 num_per_mem++; 3140 num_async_data--; 3141 3142 list_add_tail(&pasync_data_h->link, 3143 &pasync_ctx->async_data. 3144 free_list); 3145 pasync_data_h++; 3146 pasync_ctx->async_data.free_entries++; 3147 pasync_ctx->async_data.writables++; 3148 3149 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3150 data_busy_list); 3151 } 3152 3153 pasync_ctx->async_header.host_write_ptr = 0; 3154 pasync_ctx->async_header.ep_read_ptr = -1; 3155 pasync_ctx->async_data.host_write_ptr = 0; 3156 pasync_ctx->async_data.ep_read_ptr = -1; 3157 } 3158 } 3159 3160 return 0; 3161 } 3162 3163 static int 3164 be_sgl_create_contiguous(void *virtual_address, 3165 u64 physical_address, u32 length, 3166 struct be_dma_mem *sgl) 3167 { 3168 WARN_ON(!virtual_address); 3169 WARN_ON(!physical_address); 3170 WARN_ON(!length > 0); 3171 WARN_ON(!sgl); 3172 3173 sgl->va = virtual_address; 3174 sgl->dma = (unsigned long)physical_address; 3175 sgl->size = length; 3176 3177 return 0; 3178 } 3179 3180 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 3181 { 3182 memset(sgl, 0, sizeof(*sgl)); 3183 } 3184 3185 static void 3186 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 3187 struct mem_array *pmem, struct be_dma_mem *sgl) 3188 { 3189 if (sgl->va) 3190 be_sgl_destroy_contiguous(sgl); 3191 3192 be_sgl_create_contiguous(pmem->virtual_address, 3193 pmem->bus_address.u.a64.address, 3194 pmem->size, sgl); 3195 } 3196 3197 static void 3198 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 3199 struct mem_array *pmem, struct be_dma_mem *sgl) 3200 { 3201 if (sgl->va) 3202 be_sgl_destroy_contiguous(sgl); 3203 3204 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 3205 pmem->bus_address.u.a64.address, 3206 pmem->size, sgl); 3207 } 3208 3209 static int be_fill_queue(struct be_queue_info *q, 3210 u16 len, u16 entry_size, void *vaddress) 3211 { 3212 struct be_dma_mem *mem = &q->dma_mem; 3213 3214 memset(q, 0, sizeof(*q)); 3215 q->len = len; 3216 q->entry_size = entry_size; 3217 mem->size = len * entry_size; 3218 mem->va = vaddress; 3219 if (!mem->va) 3220 return -ENOMEM; 3221 memset(mem->va, 0, mem->size); 3222 return 0; 3223 } 3224 3225 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3226 struct hwi_context_memory *phwi_context) 3227 { 3228 unsigned int i, num_eq_pages; 3229 int ret = 0, eq_for_mcc; 3230 struct be_queue_info *eq; 3231 struct be_dma_mem *mem; 3232 void *eq_vaddress; 3233 dma_addr_t paddr; 3234 3235 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3236 sizeof(struct be_eq_entry)); 3237 3238 if (phba->msix_enabled) 3239 eq_for_mcc = 1; 3240 else 3241 eq_for_mcc = 0; 3242 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3243 eq = &phwi_context->be_eq[i].q; 3244 mem = &eq->dma_mem; 3245 phwi_context->be_eq[i].phba = phba; 3246 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3247 num_eq_pages * PAGE_SIZE, 3248 &paddr); 3249 if (!eq_vaddress) 3250 goto create_eq_error; 3251 3252 mem->va = eq_vaddress; 3253 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3254 sizeof(struct be_eq_entry), eq_vaddress); 3255 if (ret) { 3256 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3257 "BM_%d : be_fill_queue Failed for EQ\n"); 3258 goto create_eq_error; 3259 } 3260 3261 mem->dma = paddr; 3262 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3263 phwi_context->cur_eqd); 3264 if (ret) { 3265 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3266 "BM_%d : beiscsi_cmd_eq_create" 3267 "Failed for EQ\n"); 3268 goto create_eq_error; 3269 } 3270 3271 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3272 "BM_%d : eqid = %d\n", 3273 phwi_context->be_eq[i].q.id); 3274 } 3275 return 0; 3276 create_eq_error: 3277 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3278 eq = &phwi_context->be_eq[i].q; 3279 mem = &eq->dma_mem; 3280 if (mem->va) 3281 pci_free_consistent(phba->pcidev, num_eq_pages 3282 * PAGE_SIZE, 3283 mem->va, mem->dma); 3284 } 3285 return ret; 3286 } 3287 3288 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3289 struct hwi_context_memory *phwi_context) 3290 { 3291 unsigned int i, num_cq_pages; 3292 int ret = 0; 3293 struct be_queue_info *cq, *eq; 3294 struct be_dma_mem *mem; 3295 struct be_eq_obj *pbe_eq; 3296 void *cq_vaddress; 3297 dma_addr_t paddr; 3298 3299 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3300 sizeof(struct sol_cqe)); 3301 3302 for (i = 0; i < phba->num_cpus; i++) { 3303 cq = &phwi_context->be_cq[i]; 3304 eq = &phwi_context->be_eq[i].q; 3305 pbe_eq = &phwi_context->be_eq[i]; 3306 pbe_eq->cq = cq; 3307 pbe_eq->phba = phba; 3308 mem = &cq->dma_mem; 3309 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3310 num_cq_pages * PAGE_SIZE, 3311 &paddr); 3312 if (!cq_vaddress) 3313 goto create_cq_error; 3314 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3315 sizeof(struct sol_cqe), cq_vaddress); 3316 if (ret) { 3317 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3318 "BM_%d : be_fill_queue Failed " 3319 "for ISCSI CQ\n"); 3320 goto create_cq_error; 3321 } 3322 3323 mem->dma = paddr; 3324 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3325 false, 0); 3326 if (ret) { 3327 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3328 "BM_%d : beiscsi_cmd_eq_create" 3329 "Failed for ISCSI CQ\n"); 3330 goto create_cq_error; 3331 } 3332 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3333 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3334 "iSCSI CQ CREATED\n", cq->id, eq->id); 3335 } 3336 return 0; 3337 3338 create_cq_error: 3339 for (i = 0; i < phba->num_cpus; i++) { 3340 cq = &phwi_context->be_cq[i]; 3341 mem = &cq->dma_mem; 3342 if (mem->va) 3343 pci_free_consistent(phba->pcidev, num_cq_pages 3344 * PAGE_SIZE, 3345 mem->va, mem->dma); 3346 } 3347 return ret; 3348 3349 } 3350 3351 static int 3352 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3353 struct hwi_context_memory *phwi_context, 3354 struct hwi_controller *phwi_ctrlr, 3355 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3356 { 3357 unsigned int idx; 3358 int ret; 3359 struct be_queue_info *dq, *cq; 3360 struct be_dma_mem *mem; 3361 struct be_mem_descriptor *mem_descr; 3362 void *dq_vaddress; 3363 3364 idx = 0; 3365 dq = &phwi_context->be_def_hdrq[ulp_num]; 3366 cq = &phwi_context->be_cq[0]; 3367 mem = &dq->dma_mem; 3368 mem_descr = phba->init_mem; 3369 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3370 (ulp_num * MEM_DESCR_OFFSET); 3371 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3372 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3373 sizeof(struct phys_addr), 3374 sizeof(struct phys_addr), dq_vaddress); 3375 if (ret) { 3376 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3377 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3378 ulp_num); 3379 3380 return ret; 3381 } 3382 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3383 bus_address.u.a64.address; 3384 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3385 def_pdu_ring_sz, 3386 phba->params.defpdu_hdr_sz, 3387 BEISCSI_DEFQ_HDR, ulp_num); 3388 if (ret) { 3389 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3390 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3391 ulp_num); 3392 3393 return ret; 3394 } 3395 3396 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3397 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3398 ulp_num, 3399 phwi_context->be_def_hdrq[ulp_num].id); 3400 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); 3401 return 0; 3402 } 3403 3404 static int 3405 beiscsi_create_def_data(struct beiscsi_hba *phba, 3406 struct hwi_context_memory *phwi_context, 3407 struct hwi_controller *phwi_ctrlr, 3408 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3409 { 3410 unsigned int idx; 3411 int ret; 3412 struct be_queue_info *dataq, *cq; 3413 struct be_dma_mem *mem; 3414 struct be_mem_descriptor *mem_descr; 3415 void *dq_vaddress; 3416 3417 idx = 0; 3418 dataq = &phwi_context->be_def_dataq[ulp_num]; 3419 cq = &phwi_context->be_cq[0]; 3420 mem = &dataq->dma_mem; 3421 mem_descr = phba->init_mem; 3422 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3423 (ulp_num * MEM_DESCR_OFFSET); 3424 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3425 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3426 sizeof(struct phys_addr), 3427 sizeof(struct phys_addr), dq_vaddress); 3428 if (ret) { 3429 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3430 "BM_%d : be_fill_queue Failed for DEF PDU " 3431 "DATA on ULP : %d\n", 3432 ulp_num); 3433 3434 return ret; 3435 } 3436 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3437 bus_address.u.a64.address; 3438 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3439 def_pdu_ring_sz, 3440 phba->params.defpdu_data_sz, 3441 BEISCSI_DEFQ_DATA, ulp_num); 3442 if (ret) { 3443 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3444 "BM_%d be_cmd_create_default_pdu_queue" 3445 " Failed for DEF PDU DATA on ULP : %d\n", 3446 ulp_num); 3447 return ret; 3448 } 3449 3450 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3451 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3452 ulp_num, 3453 phwi_context->be_def_dataq[ulp_num].id); 3454 3455 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); 3456 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3457 "BM_%d : DEFAULT PDU DATA RING CREATED" 3458 "on ULP : %d\n", ulp_num); 3459 3460 return 0; 3461 } 3462 3463 3464 static int 3465 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3466 { 3467 struct be_mem_descriptor *mem_descr; 3468 struct mem_array *pm_arr; 3469 struct be_dma_mem sgl; 3470 int status, ulp_num; 3471 3472 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3473 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3474 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3475 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3476 (ulp_num * MEM_DESCR_OFFSET); 3477 pm_arr = mem_descr->mem_array; 3478 3479 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3480 status = be_cmd_iscsi_post_template_hdr( 3481 &phba->ctrl, &sgl); 3482 3483 if (status != 0) { 3484 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3485 "BM_%d : Post Template HDR Failed for" 3486 "ULP_%d\n", ulp_num); 3487 return status; 3488 } 3489 3490 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3491 "BM_%d : Template HDR Pages Posted for" 3492 "ULP_%d\n", ulp_num); 3493 } 3494 } 3495 return 0; 3496 } 3497 3498 static int 3499 beiscsi_post_pages(struct beiscsi_hba *phba) 3500 { 3501 struct be_mem_descriptor *mem_descr; 3502 struct mem_array *pm_arr; 3503 unsigned int page_offset, i; 3504 struct be_dma_mem sgl; 3505 int status, ulp_num = 0; 3506 3507 mem_descr = phba->init_mem; 3508 mem_descr += HWI_MEM_SGE; 3509 pm_arr = mem_descr->mem_array; 3510 3511 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3512 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3513 break; 3514 3515 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3516 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3517 for (i = 0; i < mem_descr->num_elements; i++) { 3518 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3519 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3520 page_offset, 3521 (pm_arr->size / PAGE_SIZE)); 3522 page_offset += pm_arr->size / PAGE_SIZE; 3523 if (status != 0) { 3524 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3525 "BM_%d : post sgl failed.\n"); 3526 return status; 3527 } 3528 pm_arr++; 3529 } 3530 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3531 "BM_%d : POSTED PAGES\n"); 3532 return 0; 3533 } 3534 3535 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3536 { 3537 struct be_dma_mem *mem = &q->dma_mem; 3538 if (mem->va) { 3539 pci_free_consistent(phba->pcidev, mem->size, 3540 mem->va, mem->dma); 3541 mem->va = NULL; 3542 } 3543 } 3544 3545 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3546 u16 len, u16 entry_size) 3547 { 3548 struct be_dma_mem *mem = &q->dma_mem; 3549 3550 memset(q, 0, sizeof(*q)); 3551 q->len = len; 3552 q->entry_size = entry_size; 3553 mem->size = len * entry_size; 3554 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3555 if (!mem->va) 3556 return -ENOMEM; 3557 return 0; 3558 } 3559 3560 static int 3561 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3562 struct hwi_context_memory *phwi_context, 3563 struct hwi_controller *phwi_ctrlr) 3564 { 3565 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3566 u64 pa_addr_lo; 3567 unsigned int idx, num, i, ulp_num; 3568 struct mem_array *pwrb_arr; 3569 void *wrb_vaddr; 3570 struct be_dma_mem sgl; 3571 struct be_mem_descriptor *mem_descr; 3572 struct hwi_wrb_context *pwrb_context; 3573 int status; 3574 uint8_t ulp_count = 0, ulp_base_num = 0; 3575 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3576 3577 idx = 0; 3578 mem_descr = phba->init_mem; 3579 mem_descr += HWI_MEM_WRB; 3580 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3581 GFP_KERNEL); 3582 if (!pwrb_arr) { 3583 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3584 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3585 return -ENOMEM; 3586 } 3587 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3588 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3589 num_wrb_rings = mem_descr->mem_array[idx].size / 3590 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3591 3592 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3593 if (num_wrb_rings) { 3594 pwrb_arr[num].virtual_address = wrb_vaddr; 3595 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3596 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3597 sizeof(struct iscsi_wrb); 3598 wrb_vaddr += pwrb_arr[num].size; 3599 pa_addr_lo += pwrb_arr[num].size; 3600 num_wrb_rings--; 3601 } else { 3602 idx++; 3603 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3604 pa_addr_lo = mem_descr->mem_array[idx].\ 3605 bus_address.u.a64.address; 3606 num_wrb_rings = mem_descr->mem_array[idx].size / 3607 (phba->params.wrbs_per_cxn * 3608 sizeof(struct iscsi_wrb)); 3609 pwrb_arr[num].virtual_address = wrb_vaddr; 3610 pwrb_arr[num].bus_address.u.a64.address\ 3611 = pa_addr_lo; 3612 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3613 sizeof(struct iscsi_wrb); 3614 wrb_vaddr += pwrb_arr[num].size; 3615 pa_addr_lo += pwrb_arr[num].size; 3616 num_wrb_rings--; 3617 } 3618 } 3619 3620 /* Get the ULP Count */ 3621 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3622 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3623 ulp_count++; 3624 ulp_base_num = ulp_num; 3625 cid_count_ulp[ulp_num] = 3626 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3627 } 3628 3629 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3630 wrb_mem_index = 0; 3631 offset = 0; 3632 size = 0; 3633 3634 if (ulp_count > 1) { 3635 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3636 3637 if (!cid_count_ulp[ulp_base_num]) 3638 ulp_base_num = (ulp_base_num + 1) % 3639 BEISCSI_ULP_COUNT; 3640 3641 cid_count_ulp[ulp_base_num]--; 3642 } 3643 3644 3645 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3646 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3647 &phwi_context->be_wrbq[i], 3648 &phwi_ctrlr->wrb_context[i], 3649 ulp_base_num); 3650 if (status != 0) { 3651 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3652 "BM_%d : wrbq create failed."); 3653 kfree(pwrb_arr); 3654 return status; 3655 } 3656 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3657 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3658 } 3659 kfree(pwrb_arr); 3660 return 0; 3661 } 3662 3663 static void free_wrb_handles(struct beiscsi_hba *phba) 3664 { 3665 unsigned int index; 3666 struct hwi_controller *phwi_ctrlr; 3667 struct hwi_wrb_context *pwrb_context; 3668 3669 phwi_ctrlr = phba->phwi_ctrlr; 3670 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3671 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3672 kfree(pwrb_context->pwrb_handle_base); 3673 kfree(pwrb_context->pwrb_handle_basestd); 3674 } 3675 } 3676 3677 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3678 { 3679 struct be_queue_info *q; 3680 struct be_ctrl_info *ctrl = &phba->ctrl; 3681 3682 q = &phba->ctrl.mcc_obj.q; 3683 if (q->created) 3684 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3685 be_queue_free(phba, q); 3686 3687 q = &phba->ctrl.mcc_obj.cq; 3688 if (q->created) 3689 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3690 be_queue_free(phba, q); 3691 } 3692 3693 static void hwi_cleanup(struct beiscsi_hba *phba) 3694 { 3695 struct be_queue_info *q; 3696 struct be_ctrl_info *ctrl = &phba->ctrl; 3697 struct hwi_controller *phwi_ctrlr; 3698 struct hwi_context_memory *phwi_context; 3699 struct hwi_async_pdu_context *pasync_ctx; 3700 int i, eq_for_mcc, ulp_num; 3701 3702 phwi_ctrlr = phba->phwi_ctrlr; 3703 phwi_context = phwi_ctrlr->phwi_ctxt; 3704 3705 be_cmd_iscsi_remove_template_hdr(ctrl); 3706 3707 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3708 q = &phwi_context->be_wrbq[i]; 3709 if (q->created) 3710 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3711 } 3712 kfree(phwi_context->be_wrbq); 3713 free_wrb_handles(phba); 3714 3715 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3716 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3717 3718 q = &phwi_context->be_def_hdrq[ulp_num]; 3719 if (q->created) 3720 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3721 3722 q = &phwi_context->be_def_dataq[ulp_num]; 3723 if (q->created) 3724 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3725 3726 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 3727 } 3728 } 3729 3730 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3731 3732 for (i = 0; i < (phba->num_cpus); i++) { 3733 q = &phwi_context->be_cq[i]; 3734 if (q->created) 3735 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3736 } 3737 3738 be_mcc_queues_destroy(phba); 3739 if (phba->msix_enabled) 3740 eq_for_mcc = 1; 3741 else 3742 eq_for_mcc = 0; 3743 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3744 q = &phwi_context->be_eq[i].q; 3745 if (q->created) 3746 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3747 } 3748 be_cmd_fw_uninit(ctrl); 3749 } 3750 3751 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3752 struct hwi_context_memory *phwi_context) 3753 { 3754 struct be_queue_info *q, *cq; 3755 struct be_ctrl_info *ctrl = &phba->ctrl; 3756 3757 /* Alloc MCC compl queue */ 3758 cq = &phba->ctrl.mcc_obj.cq; 3759 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3760 sizeof(struct be_mcc_compl))) 3761 goto err; 3762 /* Ask BE to create MCC compl queue; */ 3763 if (phba->msix_enabled) { 3764 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3765 [phba->num_cpus].q, false, true, 0)) 3766 goto mcc_cq_free; 3767 } else { 3768 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3769 false, true, 0)) 3770 goto mcc_cq_free; 3771 } 3772 3773 /* Alloc MCC queue */ 3774 q = &phba->ctrl.mcc_obj.q; 3775 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3776 goto mcc_cq_destroy; 3777 3778 /* Ask BE to create MCC queue */ 3779 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3780 goto mcc_q_free; 3781 3782 return 0; 3783 3784 mcc_q_free: 3785 be_queue_free(phba, q); 3786 mcc_cq_destroy: 3787 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3788 mcc_cq_free: 3789 be_queue_free(phba, cq); 3790 err: 3791 return -ENOMEM; 3792 } 3793 3794 /** 3795 * find_num_cpus()- Get the CPU online count 3796 * @phba: ptr to priv structure 3797 * 3798 * CPU count is used for creating EQ. 3799 **/ 3800 static void find_num_cpus(struct beiscsi_hba *phba) 3801 { 3802 int num_cpus = 0; 3803 3804 num_cpus = num_online_cpus(); 3805 3806 switch (phba->generation) { 3807 case BE_GEN2: 3808 case BE_GEN3: 3809 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? 3810 BEISCSI_MAX_NUM_CPUS : num_cpus; 3811 break; 3812 case BE_GEN4: 3813 /* 3814 * If eqid_count == 1 fall back to 3815 * INTX mechanism 3816 **/ 3817 if (phba->fw_config.eqid_count == 1) { 3818 enable_msix = 0; 3819 phba->num_cpus = 1; 3820 return; 3821 } 3822 3823 phba->num_cpus = 3824 (num_cpus > (phba->fw_config.eqid_count - 1)) ? 3825 (phba->fw_config.eqid_count - 1) : num_cpus; 3826 break; 3827 default: 3828 phba->num_cpus = 1; 3829 } 3830 } 3831 3832 static int hwi_init_port(struct beiscsi_hba *phba) 3833 { 3834 struct hwi_controller *phwi_ctrlr; 3835 struct hwi_context_memory *phwi_context; 3836 unsigned int def_pdu_ring_sz; 3837 struct be_ctrl_info *ctrl = &phba->ctrl; 3838 int status, ulp_num; 3839 3840 phwi_ctrlr = phba->phwi_ctrlr; 3841 phwi_context = phwi_ctrlr->phwi_ctxt; 3842 phwi_context->max_eqd = 128; 3843 phwi_context->min_eqd = 0; 3844 phwi_context->cur_eqd = 0; 3845 be_cmd_fw_initialize(&phba->ctrl); 3846 3847 status = beiscsi_create_eqs(phba, phwi_context); 3848 if (status != 0) { 3849 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3850 "BM_%d : EQ not created\n"); 3851 goto error; 3852 } 3853 3854 status = be_mcc_queues_create(phba, phwi_context); 3855 if (status != 0) 3856 goto error; 3857 3858 status = mgmt_check_supported_fw(ctrl, phba); 3859 if (status != 0) { 3860 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3861 "BM_%d : Unsupported fw version\n"); 3862 goto error; 3863 } 3864 3865 status = beiscsi_create_cqs(phba, phwi_context); 3866 if (status != 0) { 3867 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3868 "BM_%d : CQ not created\n"); 3869 goto error; 3870 } 3871 3872 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3873 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3874 3875 def_pdu_ring_sz = 3876 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3877 sizeof(struct phys_addr); 3878 3879 status = beiscsi_create_def_hdr(phba, phwi_context, 3880 phwi_ctrlr, 3881 def_pdu_ring_sz, 3882 ulp_num); 3883 if (status != 0) { 3884 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3885 "BM_%d : Default Header not created for ULP : %d\n", 3886 ulp_num); 3887 goto error; 3888 } 3889 3890 status = beiscsi_create_def_data(phba, phwi_context, 3891 phwi_ctrlr, 3892 def_pdu_ring_sz, 3893 ulp_num); 3894 if (status != 0) { 3895 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3896 "BM_%d : Default Data not created for ULP : %d\n", 3897 ulp_num); 3898 goto error; 3899 } 3900 } 3901 } 3902 3903 status = beiscsi_post_pages(phba); 3904 if (status != 0) { 3905 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3906 "BM_%d : Post SGL Pages Failed\n"); 3907 goto error; 3908 } 3909 3910 status = beiscsi_post_template_hdr(phba); 3911 if (status != 0) { 3912 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3913 "BM_%d : Template HDR Posting for CXN Failed\n"); 3914 } 3915 3916 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3917 if (status != 0) { 3918 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3919 "BM_%d : WRB Rings not created\n"); 3920 goto error; 3921 } 3922 3923 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3924 uint16_t async_arr_idx = 0; 3925 3926 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3927 uint16_t cri = 0; 3928 struct hwi_async_pdu_context *pasync_ctx; 3929 3930 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3931 phwi_ctrlr, ulp_num); 3932 for (cri = 0; cri < 3933 phba->params.cxns_per_ctrl; cri++) { 3934 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3935 (phwi_ctrlr, cri)) 3936 pasync_ctx->cid_to_async_cri_map[ 3937 phwi_ctrlr->wrb_context[cri].cid] = 3938 async_arr_idx++; 3939 } 3940 } 3941 } 3942 3943 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3944 "BM_%d : hwi_init_port success\n"); 3945 return 0; 3946 3947 error: 3948 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3949 "BM_%d : hwi_init_port failed"); 3950 hwi_cleanup(phba); 3951 return status; 3952 } 3953 3954 static int hwi_init_controller(struct beiscsi_hba *phba) 3955 { 3956 struct hwi_controller *phwi_ctrlr; 3957 3958 phwi_ctrlr = phba->phwi_ctrlr; 3959 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3960 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3961 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3962 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3963 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3964 phwi_ctrlr->phwi_ctxt); 3965 } else { 3966 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3967 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3968 "than one element.Failing to load\n"); 3969 return -ENOMEM; 3970 } 3971 3972 iscsi_init_global_templates(phba); 3973 if (beiscsi_init_wrb_handle(phba)) 3974 return -ENOMEM; 3975 3976 if (hwi_init_async_pdu_ctx(phba)) { 3977 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3978 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 3979 return -ENOMEM; 3980 } 3981 3982 if (hwi_init_port(phba) != 0) { 3983 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3984 "BM_%d : hwi_init_controller failed\n"); 3985 3986 return -ENOMEM; 3987 } 3988 return 0; 3989 } 3990 3991 static void beiscsi_free_mem(struct beiscsi_hba *phba) 3992 { 3993 struct be_mem_descriptor *mem_descr; 3994 int i, j; 3995 3996 mem_descr = phba->init_mem; 3997 i = 0; 3998 j = 0; 3999 for (i = 0; i < SE_MEM_MAX; i++) { 4000 for (j = mem_descr->num_elements; j > 0; j--) { 4001 pci_free_consistent(phba->pcidev, 4002 mem_descr->mem_array[j - 1].size, 4003 mem_descr->mem_array[j - 1].virtual_address, 4004 (unsigned long)mem_descr->mem_array[j - 1]. 4005 bus_address.u.a64.address); 4006 } 4007 4008 kfree(mem_descr->mem_array); 4009 mem_descr++; 4010 } 4011 kfree(phba->init_mem); 4012 kfree(phba->phwi_ctrlr->wrb_context); 4013 kfree(phba->phwi_ctrlr); 4014 } 4015 4016 static int beiscsi_init_controller(struct beiscsi_hba *phba) 4017 { 4018 int ret = -ENOMEM; 4019 4020 ret = beiscsi_get_memory(phba); 4021 if (ret < 0) { 4022 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4023 "BM_%d : beiscsi_dev_probe -" 4024 "Failed in beiscsi_alloc_memory\n"); 4025 return ret; 4026 } 4027 4028 ret = hwi_init_controller(phba); 4029 if (ret) 4030 goto free_init; 4031 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4032 "BM_%d : Return success from beiscsi_init_controller"); 4033 4034 return 0; 4035 4036 free_init: 4037 beiscsi_free_mem(phba); 4038 return ret; 4039 } 4040 4041 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 4042 { 4043 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 4044 struct sgl_handle *psgl_handle; 4045 struct iscsi_sge *pfrag; 4046 unsigned int arr_index, i, idx; 4047 unsigned int ulp_icd_start, ulp_num = 0; 4048 4049 phba->io_sgl_hndl_avbl = 0; 4050 phba->eh_sgl_hndl_avbl = 0; 4051 4052 mem_descr_sglh = phba->init_mem; 4053 mem_descr_sglh += HWI_MEM_SGLH; 4054 if (1 == mem_descr_sglh->num_elements) { 4055 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4056 phba->params.ios_per_ctrl, 4057 GFP_KERNEL); 4058 if (!phba->io_sgl_hndl_base) { 4059 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4060 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4061 return -ENOMEM; 4062 } 4063 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4064 (phba->params.icds_per_ctrl - 4065 phba->params.ios_per_ctrl), 4066 GFP_KERNEL); 4067 if (!phba->eh_sgl_hndl_base) { 4068 kfree(phba->io_sgl_hndl_base); 4069 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4070 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4071 return -ENOMEM; 4072 } 4073 } else { 4074 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4075 "BM_%d : HWI_MEM_SGLH is more than one element." 4076 "Failing to load\n"); 4077 return -ENOMEM; 4078 } 4079 4080 arr_index = 0; 4081 idx = 0; 4082 while (idx < mem_descr_sglh->num_elements) { 4083 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 4084 4085 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 4086 sizeof(struct sgl_handle)); i++) { 4087 if (arr_index < phba->params.ios_per_ctrl) { 4088 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 4089 phba->io_sgl_hndl_avbl++; 4090 arr_index++; 4091 } else { 4092 phba->eh_sgl_hndl_base[arr_index - 4093 phba->params.ios_per_ctrl] = 4094 psgl_handle; 4095 arr_index++; 4096 phba->eh_sgl_hndl_avbl++; 4097 } 4098 psgl_handle++; 4099 } 4100 idx++; 4101 } 4102 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4103 "BM_%d : phba->io_sgl_hndl_avbl=%d" 4104 "phba->eh_sgl_hndl_avbl=%d\n", 4105 phba->io_sgl_hndl_avbl, 4106 phba->eh_sgl_hndl_avbl); 4107 4108 mem_descr_sg = phba->init_mem; 4109 mem_descr_sg += HWI_MEM_SGE; 4110 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4111 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4112 mem_descr_sg->num_elements); 4113 4114 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 4115 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 4116 break; 4117 4118 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 4119 4120 arr_index = 0; 4121 idx = 0; 4122 while (idx < mem_descr_sg->num_elements) { 4123 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 4124 4125 for (i = 0; 4126 i < (mem_descr_sg->mem_array[idx].size) / 4127 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 4128 i++) { 4129 if (arr_index < phba->params.ios_per_ctrl) 4130 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 4131 else 4132 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 4133 phba->params.ios_per_ctrl]; 4134 psgl_handle->pfrag = pfrag; 4135 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4136 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4137 pfrag += phba->params.num_sge_per_io; 4138 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 4139 } 4140 idx++; 4141 } 4142 phba->io_sgl_free_index = 0; 4143 phba->io_sgl_alloc_index = 0; 4144 phba->eh_sgl_free_index = 0; 4145 phba->eh_sgl_alloc_index = 0; 4146 return 0; 4147 } 4148 4149 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4150 { 4151 int ret; 4152 uint16_t i, ulp_num; 4153 struct ulp_cid_info *ptr_cid_info = NULL; 4154 4155 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4156 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4157 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4158 GFP_KERNEL); 4159 4160 if (!ptr_cid_info) { 4161 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4162 "BM_%d : Failed to allocate memory" 4163 "for ULP_CID_INFO for ULP : %d\n", 4164 ulp_num); 4165 ret = -ENOMEM; 4166 goto free_memory; 4167 4168 } 4169 4170 /* Allocate memory for CID array */ 4171 ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 4172 BEISCSI_GET_CID_COUNT(phba, 4173 ulp_num), GFP_KERNEL); 4174 if (!ptr_cid_info->cid_array) { 4175 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4176 "BM_%d : Failed to allocate memory" 4177 "for CID_ARRAY for ULP : %d\n", 4178 ulp_num); 4179 kfree(ptr_cid_info); 4180 ptr_cid_info = NULL; 4181 ret = -ENOMEM; 4182 4183 goto free_memory; 4184 } 4185 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4186 phba, ulp_num); 4187 4188 /* Save the cid_info_array ptr */ 4189 phba->cid_array_info[ulp_num] = ptr_cid_info; 4190 } 4191 } 4192 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4193 phba->params.cxns_per_ctrl, GFP_KERNEL); 4194 if (!phba->ep_array) { 4195 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4196 "BM_%d : Failed to allocate memory in " 4197 "hba_setup_cid_tbls\n"); 4198 ret = -ENOMEM; 4199 4200 goto free_memory; 4201 } 4202 4203 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4204 phba->params.cxns_per_ctrl, GFP_KERNEL); 4205 if (!phba->conn_table) { 4206 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4207 "BM_%d : Failed to allocate memory in" 4208 "hba_setup_cid_tbls\n"); 4209 4210 kfree(phba->ep_array); 4211 phba->ep_array = NULL; 4212 ret = -ENOMEM; 4213 4214 goto free_memory; 4215 } 4216 4217 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4218 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4219 4220 ptr_cid_info = phba->cid_array_info[ulp_num]; 4221 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4222 phba->phwi_ctrlr->wrb_context[i].cid; 4223 4224 } 4225 4226 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4227 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4228 ptr_cid_info = phba->cid_array_info[ulp_num]; 4229 4230 ptr_cid_info->cid_alloc = 0; 4231 ptr_cid_info->cid_free = 0; 4232 } 4233 } 4234 return 0; 4235 4236 free_memory: 4237 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4238 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4239 ptr_cid_info = phba->cid_array_info[ulp_num]; 4240 4241 if (ptr_cid_info) { 4242 kfree(ptr_cid_info->cid_array); 4243 kfree(ptr_cid_info); 4244 phba->cid_array_info[ulp_num] = NULL; 4245 } 4246 } 4247 } 4248 4249 return ret; 4250 } 4251 4252 static void hwi_enable_intr(struct beiscsi_hba *phba) 4253 { 4254 struct be_ctrl_info *ctrl = &phba->ctrl; 4255 struct hwi_controller *phwi_ctrlr; 4256 struct hwi_context_memory *phwi_context; 4257 struct be_queue_info *eq; 4258 u8 __iomem *addr; 4259 u32 reg, i; 4260 u32 enabled; 4261 4262 phwi_ctrlr = phba->phwi_ctrlr; 4263 phwi_context = phwi_ctrlr->phwi_ctxt; 4264 4265 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4266 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4267 reg = ioread32(addr); 4268 4269 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4270 if (!enabled) { 4271 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4272 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4273 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4274 iowrite32(reg, addr); 4275 } 4276 4277 if (!phba->msix_enabled) { 4278 eq = &phwi_context->be_eq[0].q; 4279 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4280 "BM_%d : eq->id=%d\n", eq->id); 4281 4282 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4283 } else { 4284 for (i = 0; i <= phba->num_cpus; i++) { 4285 eq = &phwi_context->be_eq[i].q; 4286 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4287 "BM_%d : eq->id=%d\n", eq->id); 4288 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4289 } 4290 } 4291 } 4292 4293 static void hwi_disable_intr(struct beiscsi_hba *phba) 4294 { 4295 struct be_ctrl_info *ctrl = &phba->ctrl; 4296 4297 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4298 u32 reg = ioread32(addr); 4299 4300 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4301 if (enabled) { 4302 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4303 iowrite32(reg, addr); 4304 } else 4305 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4306 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4307 } 4308 4309 /** 4310 * beiscsi_get_boot_info()- Get the boot session info 4311 * @phba: The device priv structure instance 4312 * 4313 * Get the boot target info and store in driver priv structure 4314 * 4315 * return values 4316 * Success: 0 4317 * Failure: Non-Zero Value 4318 **/ 4319 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 4320 { 4321 struct be_cmd_get_session_resp *session_resp; 4322 struct be_dma_mem nonemb_cmd; 4323 unsigned int tag; 4324 unsigned int s_handle; 4325 int ret = -ENOMEM; 4326 4327 /* Get the session handle of the boot target */ 4328 ret = be_mgmt_get_boot_shandle(phba, &s_handle); 4329 if (ret) { 4330 beiscsi_log(phba, KERN_ERR, 4331 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4332 "BM_%d : No boot session\n"); 4333 return ret; 4334 } 4335 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, 4336 sizeof(*session_resp), 4337 &nonemb_cmd.dma); 4338 if (nonemb_cmd.va == NULL) { 4339 beiscsi_log(phba, KERN_ERR, 4340 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4341 "BM_%d : Failed to allocate memory for" 4342 "beiscsi_get_session_info\n"); 4343 4344 return -ENOMEM; 4345 } 4346 4347 tag = mgmt_get_session_info(phba, s_handle, 4348 &nonemb_cmd); 4349 if (!tag) { 4350 beiscsi_log(phba, KERN_ERR, 4351 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4352 "BM_%d : beiscsi_get_session_info" 4353 " Failed\n"); 4354 4355 goto boot_freemem; 4356 } 4357 4358 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 4359 if (ret) { 4360 beiscsi_log(phba, KERN_ERR, 4361 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4362 "BM_%d : beiscsi_get_session_info Failed"); 4363 4364 if (ret != -EBUSY) 4365 goto boot_freemem; 4366 else 4367 return ret; 4368 } 4369 4370 session_resp = nonemb_cmd.va ; 4371 4372 memcpy(&phba->boot_sess, &session_resp->session_info, 4373 sizeof(struct mgmt_session_info)); 4374 ret = 0; 4375 4376 boot_freemem: 4377 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4378 nonemb_cmd.va, nonemb_cmd.dma); 4379 return ret; 4380 } 4381 4382 static void beiscsi_boot_release(void *data) 4383 { 4384 struct beiscsi_hba *phba = data; 4385 4386 scsi_host_put(phba->shost); 4387 } 4388 4389 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 4390 { 4391 struct iscsi_boot_kobj *boot_kobj; 4392 4393 /* it has been created previously */ 4394 if (phba->boot_kset) 4395 return 0; 4396 4397 /* get boot info using mgmt cmd */ 4398 if (beiscsi_get_boot_info(phba)) 4399 /* Try to see if we can carry on without this */ 4400 return 0; 4401 4402 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 4403 if (!phba->boot_kset) 4404 return -ENOMEM; 4405 4406 /* get a ref because the show function will ref the phba */ 4407 if (!scsi_host_get(phba->shost)) 4408 goto free_kset; 4409 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 4410 beiscsi_show_boot_tgt_info, 4411 beiscsi_tgt_get_attr_visibility, 4412 beiscsi_boot_release); 4413 if (!boot_kobj) 4414 goto put_shost; 4415 4416 if (!scsi_host_get(phba->shost)) 4417 goto free_kset; 4418 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 4419 beiscsi_show_boot_ini_info, 4420 beiscsi_ini_get_attr_visibility, 4421 beiscsi_boot_release); 4422 if (!boot_kobj) 4423 goto put_shost; 4424 4425 if (!scsi_host_get(phba->shost)) 4426 goto free_kset; 4427 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 4428 beiscsi_show_boot_eth_info, 4429 beiscsi_eth_get_attr_visibility, 4430 beiscsi_boot_release); 4431 if (!boot_kobj) 4432 goto put_shost; 4433 return 0; 4434 4435 put_shost: 4436 scsi_host_put(phba->shost); 4437 free_kset: 4438 iscsi_boot_destroy_kset(phba->boot_kset); 4439 return -ENOMEM; 4440 } 4441 4442 static int beiscsi_init_port(struct beiscsi_hba *phba) 4443 { 4444 int ret; 4445 4446 ret = beiscsi_init_controller(phba); 4447 if (ret < 0) { 4448 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4449 "BM_%d : beiscsi_dev_probe - Failed in" 4450 "beiscsi_init_controller\n"); 4451 return ret; 4452 } 4453 ret = beiscsi_init_sgl_handle(phba); 4454 if (ret < 0) { 4455 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4456 "BM_%d : beiscsi_dev_probe - Failed in" 4457 "beiscsi_init_sgl_handle\n"); 4458 goto do_cleanup_ctrlr; 4459 } 4460 4461 if (hba_setup_cid_tbls(phba)) { 4462 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4463 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4464 kfree(phba->io_sgl_hndl_base); 4465 kfree(phba->eh_sgl_hndl_base); 4466 goto do_cleanup_ctrlr; 4467 } 4468 4469 return ret; 4470 4471 do_cleanup_ctrlr: 4472 hwi_cleanup(phba); 4473 return ret; 4474 } 4475 4476 static void hwi_purge_eq(struct beiscsi_hba *phba) 4477 { 4478 struct hwi_controller *phwi_ctrlr; 4479 struct hwi_context_memory *phwi_context; 4480 struct be_queue_info *eq; 4481 struct be_eq_entry *eqe = NULL; 4482 int i, eq_msix; 4483 unsigned int num_processed; 4484 4485 phwi_ctrlr = phba->phwi_ctrlr; 4486 phwi_context = phwi_ctrlr->phwi_ctxt; 4487 if (phba->msix_enabled) 4488 eq_msix = 1; 4489 else 4490 eq_msix = 0; 4491 4492 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 4493 eq = &phwi_context->be_eq[i].q; 4494 eqe = queue_tail_node(eq); 4495 num_processed = 0; 4496 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 4497 & EQE_VALID_MASK) { 4498 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 4499 queue_tail_inc(eq); 4500 eqe = queue_tail_node(eq); 4501 num_processed++; 4502 } 4503 4504 if (num_processed) 4505 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 4506 } 4507 } 4508 4509 static void beiscsi_clean_port(struct beiscsi_hba *phba) 4510 { 4511 int mgmt_status, ulp_num; 4512 struct ulp_cid_info *ptr_cid_info = NULL; 4513 4514 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4515 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4516 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); 4517 if (mgmt_status) 4518 beiscsi_log(phba, KERN_WARNING, 4519 BEISCSI_LOG_INIT, 4520 "BM_%d : mgmt_epfw_cleanup FAILED" 4521 " for ULP_%d\n", ulp_num); 4522 } 4523 } 4524 4525 hwi_purge_eq(phba); 4526 hwi_cleanup(phba); 4527 kfree(phba->io_sgl_hndl_base); 4528 kfree(phba->eh_sgl_hndl_base); 4529 kfree(phba->ep_array); 4530 kfree(phba->conn_table); 4531 4532 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4533 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4534 ptr_cid_info = phba->cid_array_info[ulp_num]; 4535 4536 if (ptr_cid_info) { 4537 kfree(ptr_cid_info->cid_array); 4538 kfree(ptr_cid_info); 4539 phba->cid_array_info[ulp_num] = NULL; 4540 } 4541 } 4542 } 4543 4544 } 4545 4546 /** 4547 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4548 * @beiscsi_conn: ptr to the conn to be cleaned up 4549 * @task: ptr to iscsi_task resource to be freed. 4550 * 4551 * Free driver mgmt resources binded to CXN. 4552 **/ 4553 void 4554 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4555 struct iscsi_task *task) 4556 { 4557 struct beiscsi_io_task *io_task; 4558 struct beiscsi_hba *phba = beiscsi_conn->phba; 4559 struct hwi_wrb_context *pwrb_context; 4560 struct hwi_controller *phwi_ctrlr; 4561 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4562 beiscsi_conn->beiscsi_conn_cid); 4563 4564 phwi_ctrlr = phba->phwi_ctrlr; 4565 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4566 4567 io_task = task->dd_data; 4568 4569 if (io_task->pwrb_handle) { 4570 memset(io_task->pwrb_handle->pwrb, 0, 4571 sizeof(struct iscsi_wrb)); 4572 free_wrb_handle(phba, pwrb_context, 4573 io_task->pwrb_handle); 4574 io_task->pwrb_handle = NULL; 4575 } 4576 4577 if (io_task->psgl_handle) { 4578 spin_lock_bh(&phba->mgmt_sgl_lock); 4579 free_mgmt_sgl_handle(phba, 4580 io_task->psgl_handle); 4581 io_task->psgl_handle = NULL; 4582 spin_unlock_bh(&phba->mgmt_sgl_lock); 4583 } 4584 4585 if (io_task->mtask_addr) 4586 pci_unmap_single(phba->pcidev, 4587 io_task->mtask_addr, 4588 io_task->mtask_data_count, 4589 PCI_DMA_TODEVICE); 4590 } 4591 4592 /** 4593 * beiscsi_cleanup_task()- Free driver resources of the task 4594 * @task: ptr to the iscsi task 4595 * 4596 **/ 4597 static void beiscsi_cleanup_task(struct iscsi_task *task) 4598 { 4599 struct beiscsi_io_task *io_task = task->dd_data; 4600 struct iscsi_conn *conn = task->conn; 4601 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4602 struct beiscsi_hba *phba = beiscsi_conn->phba; 4603 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4604 struct hwi_wrb_context *pwrb_context; 4605 struct hwi_controller *phwi_ctrlr; 4606 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4607 beiscsi_conn->beiscsi_conn_cid); 4608 4609 phwi_ctrlr = phba->phwi_ctrlr; 4610 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4611 4612 if (io_task->cmd_bhs) { 4613 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4614 io_task->bhs_pa.u.a64.address); 4615 io_task->cmd_bhs = NULL; 4616 } 4617 4618 if (task->sc) { 4619 if (io_task->pwrb_handle) { 4620 free_wrb_handle(phba, pwrb_context, 4621 io_task->pwrb_handle); 4622 io_task->pwrb_handle = NULL; 4623 } 4624 4625 if (io_task->psgl_handle) { 4626 spin_lock(&phba->io_sgl_lock); 4627 free_io_sgl_handle(phba, io_task->psgl_handle); 4628 spin_unlock(&phba->io_sgl_lock); 4629 io_task->psgl_handle = NULL; 4630 } 4631 4632 if (io_task->scsi_cmnd) { 4633 scsi_dma_unmap(io_task->scsi_cmnd); 4634 io_task->scsi_cmnd = NULL; 4635 } 4636 } else { 4637 if (!beiscsi_conn->login_in_progress) 4638 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4639 } 4640 } 4641 4642 void 4643 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4644 struct beiscsi_offload_params *params) 4645 { 4646 struct wrb_handle *pwrb_handle; 4647 struct beiscsi_hba *phba = beiscsi_conn->phba; 4648 struct iscsi_task *task = beiscsi_conn->task; 4649 struct iscsi_session *session = task->conn->session; 4650 u32 doorbell = 0; 4651 4652 /* 4653 * We can always use 0 here because it is reserved by libiscsi for 4654 * login/startup related tasks. 4655 */ 4656 beiscsi_conn->login_in_progress = 0; 4657 spin_lock_bh(&session->back_lock); 4658 beiscsi_cleanup_task(task); 4659 spin_unlock_bh(&session->back_lock); 4660 4661 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); 4662 4663 /* Check for the adapter family */ 4664 if (is_chip_be2_be3r(phba)) 4665 beiscsi_offload_cxn_v0(params, pwrb_handle, 4666 phba->init_mem); 4667 else 4668 beiscsi_offload_cxn_v2(params, pwrb_handle); 4669 4670 be_dws_le_to_cpu(pwrb_handle->pwrb, 4671 sizeof(struct iscsi_target_context_update_wrb)); 4672 4673 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4674 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4675 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4676 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4677 iowrite32(doorbell, phba->db_va + 4678 beiscsi_conn->doorbell_offset); 4679 } 4680 4681 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4682 int *index, int *age) 4683 { 4684 *index = (int)itt; 4685 if (age) 4686 *age = conn->session->age; 4687 } 4688 4689 /** 4690 * beiscsi_alloc_pdu - allocates pdu and related resources 4691 * @task: libiscsi task 4692 * @opcode: opcode of pdu for task 4693 * 4694 * This is called with the session lock held. It will allocate 4695 * the wrb and sgl if needed for the command. And it will prep 4696 * the pdu's itt. beiscsi_parse_pdu will later translate 4697 * the pdu itt to the libiscsi task itt. 4698 */ 4699 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4700 { 4701 struct beiscsi_io_task *io_task = task->dd_data; 4702 struct iscsi_conn *conn = task->conn; 4703 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4704 struct beiscsi_hba *phba = beiscsi_conn->phba; 4705 struct hwi_wrb_context *pwrb_context; 4706 struct hwi_controller *phwi_ctrlr; 4707 itt_t itt; 4708 uint16_t cri_index = 0; 4709 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4710 dma_addr_t paddr; 4711 4712 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4713 GFP_ATOMIC, &paddr); 4714 if (!io_task->cmd_bhs) 4715 return -ENOMEM; 4716 io_task->bhs_pa.u.a64.address = paddr; 4717 io_task->libiscsi_itt = (itt_t)task->itt; 4718 io_task->conn = beiscsi_conn; 4719 4720 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4721 task->hdr_max = sizeof(struct be_cmd_bhs); 4722 io_task->psgl_handle = NULL; 4723 io_task->pwrb_handle = NULL; 4724 4725 if (task->sc) { 4726 spin_lock(&phba->io_sgl_lock); 4727 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4728 spin_unlock(&phba->io_sgl_lock); 4729 if (!io_task->psgl_handle) { 4730 beiscsi_log(phba, KERN_ERR, 4731 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4732 "BM_%d : Alloc of IO_SGL_ICD Failed" 4733 "for the CID : %d\n", 4734 beiscsi_conn->beiscsi_conn_cid); 4735 goto free_hndls; 4736 } 4737 io_task->pwrb_handle = alloc_wrb_handle(phba, 4738 beiscsi_conn->beiscsi_conn_cid); 4739 if (!io_task->pwrb_handle) { 4740 beiscsi_log(phba, KERN_ERR, 4741 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4742 "BM_%d : Alloc of WRB_HANDLE Failed" 4743 "for the CID : %d\n", 4744 beiscsi_conn->beiscsi_conn_cid); 4745 goto free_io_hndls; 4746 } 4747 } else { 4748 io_task->scsi_cmnd = NULL; 4749 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4750 beiscsi_conn->task = task; 4751 if (!beiscsi_conn->login_in_progress) { 4752 spin_lock(&phba->mgmt_sgl_lock); 4753 io_task->psgl_handle = (struct sgl_handle *) 4754 alloc_mgmt_sgl_handle(phba); 4755 spin_unlock(&phba->mgmt_sgl_lock); 4756 if (!io_task->psgl_handle) { 4757 beiscsi_log(phba, KERN_ERR, 4758 BEISCSI_LOG_IO | 4759 BEISCSI_LOG_CONFIG, 4760 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4761 "for the CID : %d\n", 4762 beiscsi_conn-> 4763 beiscsi_conn_cid); 4764 goto free_hndls; 4765 } 4766 4767 beiscsi_conn->login_in_progress = 1; 4768 beiscsi_conn->plogin_sgl_handle = 4769 io_task->psgl_handle; 4770 io_task->pwrb_handle = 4771 alloc_wrb_handle(phba, 4772 beiscsi_conn->beiscsi_conn_cid); 4773 if (!io_task->pwrb_handle) { 4774 beiscsi_log(phba, KERN_ERR, 4775 BEISCSI_LOG_IO | 4776 BEISCSI_LOG_CONFIG, 4777 "BM_%d : Alloc of WRB_HANDLE Failed" 4778 "for the CID : %d\n", 4779 beiscsi_conn-> 4780 beiscsi_conn_cid); 4781 goto free_mgmt_hndls; 4782 } 4783 beiscsi_conn->plogin_wrb_handle = 4784 io_task->pwrb_handle; 4785 4786 } else { 4787 io_task->psgl_handle = 4788 beiscsi_conn->plogin_sgl_handle; 4789 io_task->pwrb_handle = 4790 beiscsi_conn->plogin_wrb_handle; 4791 } 4792 } else { 4793 spin_lock(&phba->mgmt_sgl_lock); 4794 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4795 spin_unlock(&phba->mgmt_sgl_lock); 4796 if (!io_task->psgl_handle) { 4797 beiscsi_log(phba, KERN_ERR, 4798 BEISCSI_LOG_IO | 4799 BEISCSI_LOG_CONFIG, 4800 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4801 "for the CID : %d\n", 4802 beiscsi_conn-> 4803 beiscsi_conn_cid); 4804 goto free_hndls; 4805 } 4806 io_task->pwrb_handle = 4807 alloc_wrb_handle(phba, 4808 beiscsi_conn->beiscsi_conn_cid); 4809 if (!io_task->pwrb_handle) { 4810 beiscsi_log(phba, KERN_ERR, 4811 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4812 "BM_%d : Alloc of WRB_HANDLE Failed" 4813 "for the CID : %d\n", 4814 beiscsi_conn->beiscsi_conn_cid); 4815 goto free_mgmt_hndls; 4816 } 4817 4818 } 4819 } 4820 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4821 wrb_index << 16) | (unsigned int) 4822 (io_task->psgl_handle->sgl_index)); 4823 io_task->pwrb_handle->pio_handle = task; 4824 4825 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4826 return 0; 4827 4828 free_io_hndls: 4829 spin_lock(&phba->io_sgl_lock); 4830 free_io_sgl_handle(phba, io_task->psgl_handle); 4831 spin_unlock(&phba->io_sgl_lock); 4832 goto free_hndls; 4833 free_mgmt_hndls: 4834 spin_lock(&phba->mgmt_sgl_lock); 4835 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4836 io_task->psgl_handle = NULL; 4837 spin_unlock(&phba->mgmt_sgl_lock); 4838 free_hndls: 4839 phwi_ctrlr = phba->phwi_ctrlr; 4840 cri_index = BE_GET_CRI_FROM_CID( 4841 beiscsi_conn->beiscsi_conn_cid); 4842 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4843 if (io_task->pwrb_handle) 4844 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4845 io_task->pwrb_handle = NULL; 4846 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4847 io_task->bhs_pa.u.a64.address); 4848 io_task->cmd_bhs = NULL; 4849 return -ENOMEM; 4850 } 4851 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4852 unsigned int num_sg, unsigned int xferlen, 4853 unsigned int writedir) 4854 { 4855 4856 struct beiscsi_io_task *io_task = task->dd_data; 4857 struct iscsi_conn *conn = task->conn; 4858 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4859 struct beiscsi_hba *phba = beiscsi_conn->phba; 4860 struct iscsi_wrb *pwrb = NULL; 4861 unsigned int doorbell = 0; 4862 4863 pwrb = io_task->pwrb_handle->pwrb; 4864 4865 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4866 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4867 4868 if (writedir) { 4869 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4870 INI_WR_CMD); 4871 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4872 } else { 4873 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4874 INI_RD_CMD); 4875 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4876 } 4877 4878 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4879 type, pwrb); 4880 4881 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4882 cpu_to_be16(*(unsigned short *) 4883 &io_task->cmd_bhs->iscsi_hdr.lun)); 4884 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4885 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4886 io_task->pwrb_handle->wrb_index); 4887 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4888 be32_to_cpu(task->cmdsn)); 4889 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4890 io_task->psgl_handle->sgl_index); 4891 4892 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4893 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4894 io_task->pwrb_handle->nxt_wrb_index); 4895 4896 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4897 4898 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4899 doorbell |= (io_task->pwrb_handle->wrb_index & 4900 DB_DEF_PDU_WRB_INDEX_MASK) << 4901 DB_DEF_PDU_WRB_INDEX_SHIFT; 4902 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4903 iowrite32(doorbell, phba->db_va + 4904 beiscsi_conn->doorbell_offset); 4905 return 0; 4906 } 4907 4908 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4909 unsigned int num_sg, unsigned int xferlen, 4910 unsigned int writedir) 4911 { 4912 4913 struct beiscsi_io_task *io_task = task->dd_data; 4914 struct iscsi_conn *conn = task->conn; 4915 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4916 struct beiscsi_hba *phba = beiscsi_conn->phba; 4917 struct iscsi_wrb *pwrb = NULL; 4918 unsigned int doorbell = 0; 4919 4920 pwrb = io_task->pwrb_handle->pwrb; 4921 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4922 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4923 4924 if (writedir) { 4925 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4926 INI_WR_CMD); 4927 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4928 } else { 4929 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4930 INI_RD_CMD); 4931 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4932 } 4933 4934 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4935 type, pwrb); 4936 4937 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4938 cpu_to_be16(*(unsigned short *) 4939 &io_task->cmd_bhs->iscsi_hdr.lun)); 4940 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4941 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4942 io_task->pwrb_handle->wrb_index); 4943 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4944 be32_to_cpu(task->cmdsn)); 4945 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4946 io_task->psgl_handle->sgl_index); 4947 4948 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4949 4950 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4951 io_task->pwrb_handle->nxt_wrb_index); 4952 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4953 4954 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4955 doorbell |= (io_task->pwrb_handle->wrb_index & 4956 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4957 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4958 4959 iowrite32(doorbell, phba->db_va + 4960 beiscsi_conn->doorbell_offset); 4961 return 0; 4962 } 4963 4964 static int beiscsi_mtask(struct iscsi_task *task) 4965 { 4966 struct beiscsi_io_task *io_task = task->dd_data; 4967 struct iscsi_conn *conn = task->conn; 4968 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4969 struct beiscsi_hba *phba = beiscsi_conn->phba; 4970 struct iscsi_wrb *pwrb = NULL; 4971 unsigned int doorbell = 0; 4972 unsigned int cid; 4973 unsigned int pwrb_typeoffset = 0; 4974 4975 cid = beiscsi_conn->beiscsi_conn_cid; 4976 pwrb = io_task->pwrb_handle->pwrb; 4977 memset(pwrb, 0, sizeof(*pwrb)); 4978 4979 if (is_chip_be2_be3r(phba)) { 4980 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4981 be32_to_cpu(task->cmdsn)); 4982 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4983 io_task->pwrb_handle->wrb_index); 4984 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4985 io_task->psgl_handle->sgl_index); 4986 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 4987 task->data_count); 4988 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4989 io_task->pwrb_handle->nxt_wrb_index); 4990 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 4991 } else { 4992 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4993 be32_to_cpu(task->cmdsn)); 4994 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4995 io_task->pwrb_handle->wrb_index); 4996 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4997 io_task->psgl_handle->sgl_index); 4998 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 4999 task->data_count); 5000 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 5001 io_task->pwrb_handle->nxt_wrb_index); 5002 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 5003 } 5004 5005 5006 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 5007 case ISCSI_OP_LOGIN: 5008 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 5009 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5010 hwi_write_buffer(pwrb, task); 5011 break; 5012 case ISCSI_OP_NOOP_OUT: 5013 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 5014 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5015 if (is_chip_be2_be3r(phba)) 5016 AMAP_SET_BITS(struct amap_iscsi_wrb, 5017 dmsg, pwrb, 1); 5018 else 5019 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5020 dmsg, pwrb, 1); 5021 } else { 5022 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 5023 if (is_chip_be2_be3r(phba)) 5024 AMAP_SET_BITS(struct amap_iscsi_wrb, 5025 dmsg, pwrb, 0); 5026 else 5027 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5028 dmsg, pwrb, 0); 5029 } 5030 hwi_write_buffer(pwrb, task); 5031 break; 5032 case ISCSI_OP_TEXT: 5033 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5034 hwi_write_buffer(pwrb, task); 5035 break; 5036 case ISCSI_OP_SCSI_TMFUNC: 5037 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 5038 hwi_write_buffer(pwrb, task); 5039 break; 5040 case ISCSI_OP_LOGOUT: 5041 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 5042 hwi_write_buffer(pwrb, task); 5043 break; 5044 5045 default: 5046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5047 "BM_%d : opcode =%d Not supported\n", 5048 task->hdr->opcode & ISCSI_OPCODE_MASK); 5049 5050 return -EINVAL; 5051 } 5052 5053 /* Set the task type */ 5054 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 5055 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 5056 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 5057 5058 doorbell |= cid & DB_WRB_POST_CID_MASK; 5059 doorbell |= (io_task->pwrb_handle->wrb_index & 5060 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5061 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5062 iowrite32(doorbell, phba->db_va + 5063 beiscsi_conn->doorbell_offset); 5064 return 0; 5065 } 5066 5067 static int beiscsi_task_xmit(struct iscsi_task *task) 5068 { 5069 struct beiscsi_io_task *io_task = task->dd_data; 5070 struct scsi_cmnd *sc = task->sc; 5071 struct beiscsi_hba *phba = NULL; 5072 struct scatterlist *sg; 5073 int num_sg; 5074 unsigned int writedir = 0, xferlen = 0; 5075 5076 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; 5077 5078 if (!sc) 5079 return beiscsi_mtask(task); 5080 5081 io_task->scsi_cmnd = sc; 5082 num_sg = scsi_dma_map(sc); 5083 if (num_sg < 0) { 5084 struct iscsi_conn *conn = task->conn; 5085 struct beiscsi_hba *phba = NULL; 5086 5087 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 5088 beiscsi_log(phba, KERN_ERR, 5089 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 5090 "BM_%d : scsi_dma_map Failed " 5091 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 5092 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 5093 io_task->libiscsi_itt, scsi_bufflen(sc)); 5094 5095 return num_sg; 5096 } 5097 xferlen = scsi_bufflen(sc); 5098 sg = scsi_sglist(sc); 5099 if (sc->sc_data_direction == DMA_TO_DEVICE) 5100 writedir = 1; 5101 else 5102 writedir = 0; 5103 5104 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 5105 } 5106 5107 /** 5108 * beiscsi_bsg_request - handle bsg request from ISCSI transport 5109 * @job: job to handle 5110 */ 5111 static int beiscsi_bsg_request(struct bsg_job *job) 5112 { 5113 struct Scsi_Host *shost; 5114 struct beiscsi_hba *phba; 5115 struct iscsi_bsg_request *bsg_req = job->request; 5116 int rc = -EINVAL; 5117 unsigned int tag; 5118 struct be_dma_mem nonemb_cmd; 5119 struct be_cmd_resp_hdr *resp; 5120 struct iscsi_bsg_reply *bsg_reply = job->reply; 5121 unsigned short status, extd_status; 5122 5123 shost = iscsi_job_to_shost(job); 5124 phba = iscsi_host_priv(shost); 5125 5126 switch (bsg_req->msgcode) { 5127 case ISCSI_BSG_HST_VENDOR: 5128 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 5129 job->request_payload.payload_len, 5130 &nonemb_cmd.dma); 5131 if (nonemb_cmd.va == NULL) { 5132 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5133 "BM_%d : Failed to allocate memory for " 5134 "beiscsi_bsg_request\n"); 5135 return -ENOMEM; 5136 } 5137 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 5138 &nonemb_cmd); 5139 if (!tag) { 5140 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5141 "BM_%d : MBX Tag Allocation Failed\n"); 5142 5143 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5144 nonemb_cmd.va, nonemb_cmd.dma); 5145 return -EAGAIN; 5146 } 5147 5148 rc = wait_event_interruptible_timeout( 5149 phba->ctrl.mcc_wait[tag], 5150 phba->ctrl.mcc_numtag[tag], 5151 msecs_to_jiffies( 5152 BEISCSI_HOST_MBX_TIMEOUT)); 5153 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 5154 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 5155 free_mcc_tag(&phba->ctrl, tag); 5156 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 5157 sg_copy_from_buffer(job->reply_payload.sg_list, 5158 job->reply_payload.sg_cnt, 5159 nonemb_cmd.va, (resp->response_length 5160 + sizeof(*resp))); 5161 bsg_reply->reply_payload_rcv_len = resp->response_length; 5162 bsg_reply->result = status; 5163 bsg_job_done(job, bsg_reply->result, 5164 bsg_reply->reply_payload_rcv_len); 5165 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5166 nonemb_cmd.va, nonemb_cmd.dma); 5167 if (status || extd_status) { 5168 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5169 "BM_%d : MBX Cmd Failed" 5170 " status = %d extd_status = %d\n", 5171 status, extd_status); 5172 5173 return -EIO; 5174 } else { 5175 rc = 0; 5176 } 5177 break; 5178 5179 default: 5180 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5181 "BM_%d : Unsupported bsg command: 0x%x\n", 5182 bsg_req->msgcode); 5183 break; 5184 } 5185 5186 return rc; 5187 } 5188 5189 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 5190 { 5191 /* Set the logging parameter */ 5192 beiscsi_log_enable_init(phba, beiscsi_log_enable); 5193 } 5194 5195 /* 5196 * beiscsi_quiesce()- Cleanup Driver resources 5197 * @phba: Instance Priv structure 5198 * @unload_state:i Clean or EEH unload state 5199 * 5200 * Free the OS and HW resources held by the driver 5201 **/ 5202 static void beiscsi_quiesce(struct beiscsi_hba *phba, 5203 uint32_t unload_state) 5204 { 5205 struct hwi_controller *phwi_ctrlr; 5206 struct hwi_context_memory *phwi_context; 5207 struct be_eq_obj *pbe_eq; 5208 unsigned int i, msix_vec; 5209 5210 phwi_ctrlr = phba->phwi_ctrlr; 5211 phwi_context = phwi_ctrlr->phwi_ctxt; 5212 hwi_disable_intr(phba); 5213 if (phba->msix_enabled) { 5214 for (i = 0; i <= phba->num_cpus; i++) { 5215 msix_vec = phba->msix_entries[i].vector; 5216 synchronize_irq(msix_vec); 5217 free_irq(msix_vec, &phwi_context->be_eq[i]); 5218 kfree(phba->msi_name[i]); 5219 } 5220 } else 5221 if (phba->pcidev->irq) { 5222 synchronize_irq(phba->pcidev->irq); 5223 free_irq(phba->pcidev->irq, phba); 5224 } 5225 pci_disable_msix(phba->pcidev); 5226 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 5227 5228 for (i = 0; i < phba->num_cpus; i++) { 5229 pbe_eq = &phwi_context->be_eq[i]; 5230 blk_iopoll_disable(&pbe_eq->iopoll); 5231 } 5232 5233 if (unload_state == BEISCSI_CLEAN_UNLOAD) { 5234 destroy_workqueue(phba->wq); 5235 beiscsi_clean_port(phba); 5236 beiscsi_free_mem(phba); 5237 5238 beiscsi_unmap_pci_function(phba); 5239 pci_free_consistent(phba->pcidev, 5240 phba->ctrl.mbox_mem_alloced.size, 5241 phba->ctrl.mbox_mem_alloced.va, 5242 phba->ctrl.mbox_mem_alloced.dma); 5243 } else { 5244 hwi_purge_eq(phba); 5245 hwi_cleanup(phba); 5246 } 5247 5248 } 5249 5250 static void beiscsi_remove(struct pci_dev *pcidev) 5251 { 5252 5253 struct beiscsi_hba *phba = NULL; 5254 5255 phba = pci_get_drvdata(pcidev); 5256 if (!phba) { 5257 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5258 return; 5259 } 5260 5261 beiscsi_destroy_def_ifaces(phba); 5262 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5263 iscsi_boot_destroy_kset(phba->boot_kset); 5264 iscsi_host_remove(phba->shost); 5265 pci_dev_put(phba->pcidev); 5266 iscsi_host_free(phba->shost); 5267 pci_disable_pcie_error_reporting(pcidev); 5268 pci_set_drvdata(pcidev, NULL); 5269 pci_disable_device(pcidev); 5270 } 5271 5272 static void beiscsi_shutdown(struct pci_dev *pcidev) 5273 { 5274 5275 struct beiscsi_hba *phba = NULL; 5276 5277 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 5278 if (!phba) { 5279 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 5280 return; 5281 } 5282 5283 phba->state = BE_ADAPTER_STATE_SHUTDOWN; 5284 iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); 5285 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5286 pci_disable_device(pcidev); 5287 } 5288 5289 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 5290 { 5291 int i, status; 5292 5293 for (i = 0; i <= phba->num_cpus; i++) 5294 phba->msix_entries[i].entry = i; 5295 5296 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, 5297 phba->num_cpus + 1, phba->num_cpus + 1); 5298 if (status > 0) 5299 phba->msix_enabled = true; 5300 5301 return; 5302 } 5303 5304 static void be_eqd_update(struct beiscsi_hba *phba) 5305 { 5306 struct be_set_eqd set_eqd[MAX_CPUS]; 5307 struct be_aic_obj *aic; 5308 struct be_eq_obj *pbe_eq; 5309 struct hwi_controller *phwi_ctrlr; 5310 struct hwi_context_memory *phwi_context; 5311 int eqd, i, num = 0; 5312 ulong now; 5313 u32 pps, delta; 5314 unsigned int tag; 5315 5316 phwi_ctrlr = phba->phwi_ctrlr; 5317 phwi_context = phwi_ctrlr->phwi_ctxt; 5318 5319 for (i = 0; i <= phba->num_cpus; i++) { 5320 aic = &phba->aic_obj[i]; 5321 pbe_eq = &phwi_context->be_eq[i]; 5322 now = jiffies; 5323 if (!aic->jiffs || time_before(now, aic->jiffs) || 5324 pbe_eq->cq_count < aic->eq_prev) { 5325 aic->jiffs = now; 5326 aic->eq_prev = pbe_eq->cq_count; 5327 continue; 5328 } 5329 delta = jiffies_to_msecs(now - aic->jiffs); 5330 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5331 eqd = (pps / 1500) << 2; 5332 5333 if (eqd < 8) 5334 eqd = 0; 5335 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5336 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5337 5338 aic->jiffs = now; 5339 aic->eq_prev = pbe_eq->cq_count; 5340 5341 if (eqd != aic->prev_eqd) { 5342 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5343 set_eqd[num].eq_id = pbe_eq->q.id; 5344 aic->prev_eqd = eqd; 5345 num++; 5346 } 5347 } 5348 if (num) { 5349 tag = be_cmd_modify_eq_delay(phba, set_eqd, num); 5350 if (tag) 5351 beiscsi_mccq_compl(phba, tag, NULL, NULL); 5352 } 5353 } 5354 5355 static void be_check_boot_session(struct beiscsi_hba *phba) 5356 { 5357 if (beiscsi_setup_boot_info(phba)) 5358 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5359 "BM_%d : Could not set up " 5360 "iSCSI boot info on async event.\n"); 5361 } 5362 5363 /* 5364 * beiscsi_hw_health_check()- Check adapter health 5365 * @work: work item to check HW health 5366 * 5367 * Check if adapter in an unrecoverable state or not. 5368 **/ 5369 static void 5370 beiscsi_hw_health_check(struct work_struct *work) 5371 { 5372 struct beiscsi_hba *phba = 5373 container_of(work, struct beiscsi_hba, 5374 beiscsi_hw_check_task.work); 5375 5376 be_eqd_update(phba); 5377 5378 if (phba->state & BE_ADAPTER_CHECK_BOOT) { 5379 phba->state &= ~BE_ADAPTER_CHECK_BOOT; 5380 be_check_boot_session(phba); 5381 } 5382 5383 beiscsi_ue_detect(phba); 5384 5385 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5386 msecs_to_jiffies(1000)); 5387 } 5388 5389 5390 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5391 pci_channel_state_t state) 5392 { 5393 struct beiscsi_hba *phba = NULL; 5394 5395 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5396 phba->state |= BE_ADAPTER_PCI_ERR; 5397 5398 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5399 "BM_%d : EEH error detected\n"); 5400 5401 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); 5402 5403 if (state == pci_channel_io_perm_failure) { 5404 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5405 "BM_%d : EEH : State PERM Failure"); 5406 return PCI_ERS_RESULT_DISCONNECT; 5407 } 5408 5409 pci_disable_device(pdev); 5410 5411 /* The error could cause the FW to trigger a flash debug dump. 5412 * Resetting the card while flash dump is in progress 5413 * can cause it not to recover; wait for it to finish. 5414 * Wait only for first function as it is needed only once per 5415 * adapter. 5416 **/ 5417 if (pdev->devfn == 0) 5418 ssleep(30); 5419 5420 return PCI_ERS_RESULT_NEED_RESET; 5421 } 5422 5423 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5424 { 5425 struct beiscsi_hba *phba = NULL; 5426 int status = 0; 5427 5428 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5429 5430 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5431 "BM_%d : EEH Reset\n"); 5432 5433 status = pci_enable_device(pdev); 5434 if (status) 5435 return PCI_ERS_RESULT_DISCONNECT; 5436 5437 pci_set_master(pdev); 5438 pci_set_power_state(pdev, PCI_D0); 5439 pci_restore_state(pdev); 5440 5441 /* Wait for the CHIP Reset to complete */ 5442 status = be_chk_reset_complete(phba); 5443 if (!status) { 5444 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5445 "BM_%d : EEH Reset Completed\n"); 5446 } else { 5447 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5448 "BM_%d : EEH Reset Completion Failure\n"); 5449 return PCI_ERS_RESULT_DISCONNECT; 5450 } 5451 5452 pci_cleanup_aer_uncorrect_error_status(pdev); 5453 return PCI_ERS_RESULT_RECOVERED; 5454 } 5455 5456 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5457 { 5458 int ret = 0, i; 5459 struct be_eq_obj *pbe_eq; 5460 struct beiscsi_hba *phba = NULL; 5461 struct hwi_controller *phwi_ctrlr; 5462 struct hwi_context_memory *phwi_context; 5463 5464 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5465 pci_save_state(pdev); 5466 5467 if (enable_msix) 5468 find_num_cpus(phba); 5469 else 5470 phba->num_cpus = 1; 5471 5472 if (enable_msix) { 5473 beiscsi_msix_enable(phba); 5474 if (!phba->msix_enabled) 5475 phba->num_cpus = 1; 5476 } 5477 5478 ret = beiscsi_cmd_reset_function(phba); 5479 if (ret) { 5480 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5481 "BM_%d : Reset Failed\n"); 5482 goto ret_err; 5483 } 5484 5485 ret = be_chk_reset_complete(phba); 5486 if (ret) { 5487 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5488 "BM_%d : Failed to get out of reset.\n"); 5489 goto ret_err; 5490 } 5491 5492 beiscsi_get_params(phba); 5493 phba->shost->max_id = phba->params.cxns_per_ctrl; 5494 phba->shost->can_queue = phba->params.ios_per_ctrl; 5495 ret = hwi_init_controller(phba); 5496 5497 for (i = 0; i < MAX_MCC_CMD; i++) { 5498 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5499 phba->ctrl.mcc_tag[i] = i + 1; 5500 phba->ctrl.mcc_numtag[i + 1] = 0; 5501 phba->ctrl.mcc_tag_available++; 5502 } 5503 5504 phwi_ctrlr = phba->phwi_ctrlr; 5505 phwi_context = phwi_ctrlr->phwi_ctxt; 5506 5507 for (i = 0; i < phba->num_cpus; i++) { 5508 pbe_eq = &phwi_context->be_eq[i]; 5509 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5510 be_iopoll); 5511 blk_iopoll_enable(&pbe_eq->iopoll); 5512 } 5513 5514 i = (phba->msix_enabled) ? i : 0; 5515 /* Work item for MCC handling */ 5516 pbe_eq = &phwi_context->be_eq[i]; 5517 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5518 5519 ret = beiscsi_init_irqs(phba); 5520 if (ret < 0) { 5521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5522 "BM_%d : beiscsi_eeh_resume - " 5523 "Failed to beiscsi_init_irqs\n"); 5524 goto ret_err; 5525 } 5526 5527 hwi_enable_intr(phba); 5528 phba->state &= ~BE_ADAPTER_PCI_ERR; 5529 5530 return; 5531 ret_err: 5532 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5533 "BM_%d : AER EEH Resume Failed\n"); 5534 } 5535 5536 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5537 const struct pci_device_id *id) 5538 { 5539 struct beiscsi_hba *phba = NULL; 5540 struct hwi_controller *phwi_ctrlr; 5541 struct hwi_context_memory *phwi_context; 5542 struct be_eq_obj *pbe_eq; 5543 int ret = 0, i; 5544 5545 ret = beiscsi_enable_pci(pcidev); 5546 if (ret < 0) { 5547 dev_err(&pcidev->dev, 5548 "beiscsi_dev_probe - Failed to enable pci device\n"); 5549 return ret; 5550 } 5551 5552 phba = beiscsi_hba_alloc(pcidev); 5553 if (!phba) { 5554 dev_err(&pcidev->dev, 5555 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5556 goto disable_pci; 5557 } 5558 5559 /* Enable EEH reporting */ 5560 ret = pci_enable_pcie_error_reporting(pcidev); 5561 if (ret) 5562 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5563 "BM_%d : PCIe Error Reporting " 5564 "Enabling Failed\n"); 5565 5566 pci_save_state(pcidev); 5567 5568 /* Initialize Driver configuration Paramters */ 5569 beiscsi_hba_attrs_init(phba); 5570 5571 phba->fw_timeout = false; 5572 phba->mac_addr_set = false; 5573 5574 5575 switch (pcidev->device) { 5576 case BE_DEVICE_ID1: 5577 case OC_DEVICE_ID1: 5578 case OC_DEVICE_ID2: 5579 phba->generation = BE_GEN2; 5580 phba->iotask_fn = beiscsi_iotask; 5581 break; 5582 case BE_DEVICE_ID2: 5583 case OC_DEVICE_ID3: 5584 phba->generation = BE_GEN3; 5585 phba->iotask_fn = beiscsi_iotask; 5586 break; 5587 case OC_SKH_ID1: 5588 phba->generation = BE_GEN4; 5589 phba->iotask_fn = beiscsi_iotask_v2; 5590 break; 5591 default: 5592 phba->generation = 0; 5593 } 5594 5595 ret = be_ctrl_init(phba, pcidev); 5596 if (ret) { 5597 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5598 "BM_%d : beiscsi_dev_probe-" 5599 "Failed in be_ctrl_init\n"); 5600 goto hba_free; 5601 } 5602 5603 ret = beiscsi_cmd_reset_function(phba); 5604 if (ret) { 5605 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5606 "BM_%d : Reset Failed\n"); 5607 goto hba_free; 5608 } 5609 ret = be_chk_reset_complete(phba); 5610 if (ret) { 5611 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5612 "BM_%d : Failed to get out of reset.\n"); 5613 goto hba_free; 5614 } 5615 5616 spin_lock_init(&phba->io_sgl_lock); 5617 spin_lock_init(&phba->mgmt_sgl_lock); 5618 spin_lock_init(&phba->isr_lock); 5619 spin_lock_init(&phba->async_pdu_lock); 5620 ret = mgmt_get_fw_config(&phba->ctrl, phba); 5621 if (ret != 0) { 5622 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5623 "BM_%d : Error getting fw config\n"); 5624 goto free_port; 5625 } 5626 5627 if (enable_msix) 5628 find_num_cpus(phba); 5629 else 5630 phba->num_cpus = 1; 5631 5632 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5633 "BM_%d : num_cpus = %d\n", 5634 phba->num_cpus); 5635 5636 if (enable_msix) { 5637 beiscsi_msix_enable(phba); 5638 if (!phba->msix_enabled) 5639 phba->num_cpus = 1; 5640 } 5641 5642 phba->shost->max_id = phba->params.cxns_per_ctrl; 5643 beiscsi_get_params(phba); 5644 phba->shost->can_queue = phba->params.ios_per_ctrl; 5645 ret = beiscsi_init_port(phba); 5646 if (ret < 0) { 5647 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5648 "BM_%d : beiscsi_dev_probe-" 5649 "Failed in beiscsi_init_port\n"); 5650 goto free_port; 5651 } 5652 5653 for (i = 0; i < MAX_MCC_CMD; i++) { 5654 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5655 phba->ctrl.mcc_tag[i] = i + 1; 5656 phba->ctrl.mcc_numtag[i + 1] = 0; 5657 phba->ctrl.mcc_tag_available++; 5658 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5659 sizeof(struct be_dma_mem)); 5660 } 5661 5662 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5663 5664 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 5665 phba->shost->host_no); 5666 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); 5667 if (!phba->wq) { 5668 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5669 "BM_%d : beiscsi_dev_probe-" 5670 "Failed to allocate work queue\n"); 5671 goto free_twq; 5672 } 5673 5674 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, 5675 beiscsi_hw_health_check); 5676 5677 phwi_ctrlr = phba->phwi_ctrlr; 5678 phwi_context = phwi_ctrlr->phwi_ctxt; 5679 5680 for (i = 0; i < phba->num_cpus; i++) { 5681 pbe_eq = &phwi_context->be_eq[i]; 5682 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5683 be_iopoll); 5684 blk_iopoll_enable(&pbe_eq->iopoll); 5685 } 5686 5687 i = (phba->msix_enabled) ? i : 0; 5688 /* Work item for MCC handling */ 5689 pbe_eq = &phwi_context->be_eq[i]; 5690 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5691 5692 ret = beiscsi_init_irqs(phba); 5693 if (ret < 0) { 5694 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5695 "BM_%d : beiscsi_dev_probe-" 5696 "Failed to beiscsi_init_irqs\n"); 5697 goto free_blkenbld; 5698 } 5699 hwi_enable_intr(phba); 5700 5701 if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) 5702 goto free_blkenbld; 5703 5704 if (beiscsi_setup_boot_info(phba)) 5705 /* 5706 * log error but continue, because we may not be using 5707 * iscsi boot. 5708 */ 5709 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5710 "BM_%d : Could not set up " 5711 "iSCSI boot info.\n"); 5712 5713 beiscsi_create_def_ifaces(phba); 5714 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5715 msecs_to_jiffies(1000)); 5716 5717 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5718 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5719 return 0; 5720 5721 free_blkenbld: 5722 destroy_workqueue(phba->wq); 5723 for (i = 0; i < phba->num_cpus; i++) { 5724 pbe_eq = &phwi_context->be_eq[i]; 5725 blk_iopoll_disable(&pbe_eq->iopoll); 5726 } 5727 free_twq: 5728 beiscsi_clean_port(phba); 5729 beiscsi_free_mem(phba); 5730 free_port: 5731 pci_free_consistent(phba->pcidev, 5732 phba->ctrl.mbox_mem_alloced.size, 5733 phba->ctrl.mbox_mem_alloced.va, 5734 phba->ctrl.mbox_mem_alloced.dma); 5735 beiscsi_unmap_pci_function(phba); 5736 hba_free: 5737 if (phba->msix_enabled) 5738 pci_disable_msix(phba->pcidev); 5739 iscsi_host_remove(phba->shost); 5740 pci_dev_put(phba->pcidev); 5741 iscsi_host_free(phba->shost); 5742 disable_pci: 5743 pci_disable_device(pcidev); 5744 return ret; 5745 } 5746 5747 static struct pci_error_handlers beiscsi_eeh_handlers = { 5748 .error_detected = beiscsi_eeh_err_detected, 5749 .slot_reset = beiscsi_eeh_reset, 5750 .resume = beiscsi_eeh_resume, 5751 }; 5752 5753 struct iscsi_transport beiscsi_iscsi_transport = { 5754 .owner = THIS_MODULE, 5755 .name = DRV_NAME, 5756 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5757 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5758 .create_session = beiscsi_session_create, 5759 .destroy_session = beiscsi_session_destroy, 5760 .create_conn = beiscsi_conn_create, 5761 .bind_conn = beiscsi_conn_bind, 5762 .destroy_conn = iscsi_conn_teardown, 5763 .attr_is_visible = be2iscsi_attr_is_visible, 5764 .set_iface_param = be2iscsi_iface_set_param, 5765 .get_iface_param = be2iscsi_iface_get_param, 5766 .set_param = beiscsi_set_param, 5767 .get_conn_param = iscsi_conn_get_param, 5768 .get_session_param = iscsi_session_get_param, 5769 .get_host_param = beiscsi_get_host_param, 5770 .start_conn = beiscsi_conn_start, 5771 .stop_conn = iscsi_conn_stop, 5772 .send_pdu = iscsi_conn_send_pdu, 5773 .xmit_task = beiscsi_task_xmit, 5774 .cleanup_task = beiscsi_cleanup_task, 5775 .alloc_pdu = beiscsi_alloc_pdu, 5776 .parse_pdu_itt = beiscsi_parse_pdu, 5777 .get_stats = beiscsi_conn_get_stats, 5778 .get_ep_param = beiscsi_ep_get_param, 5779 .ep_connect = beiscsi_ep_connect, 5780 .ep_poll = beiscsi_ep_poll, 5781 .ep_disconnect = beiscsi_ep_disconnect, 5782 .session_recovery_timedout = iscsi_session_recovery_timedout, 5783 .bsg_request = beiscsi_bsg_request, 5784 }; 5785 5786 static struct pci_driver beiscsi_pci_driver = { 5787 .name = DRV_NAME, 5788 .probe = beiscsi_dev_probe, 5789 .remove = beiscsi_remove, 5790 .shutdown = beiscsi_shutdown, 5791 .id_table = beiscsi_pci_id_table, 5792 .err_handler = &beiscsi_eeh_handlers 5793 }; 5794 5795 5796 static int __init beiscsi_module_init(void) 5797 { 5798 int ret; 5799 5800 beiscsi_scsi_transport = 5801 iscsi_register_transport(&beiscsi_iscsi_transport); 5802 if (!beiscsi_scsi_transport) { 5803 printk(KERN_ERR 5804 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5805 return -ENOMEM; 5806 } 5807 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5808 &beiscsi_iscsi_transport); 5809 5810 ret = pci_register_driver(&beiscsi_pci_driver); 5811 if (ret) { 5812 printk(KERN_ERR 5813 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5814 goto unregister_iscsi_transport; 5815 } 5816 return 0; 5817 5818 unregister_iscsi_transport: 5819 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5820 return ret; 5821 } 5822 5823 static void __exit beiscsi_module_exit(void) 5824 { 5825 pci_unregister_driver(&beiscsi_pci_driver); 5826 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5827 } 5828 5829 module_init(beiscsi_module_init); 5830 module_exit(beiscsi_module_exit); 5831