1 /** 2 * Copyright (C) 2005 - 2015 Avago Technologies 3 * All rights reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License version 2 7 * as published by the Free Software Foundation. The full GNU General 8 * Public License is included in this distribution in the file called COPYING. 9 * 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com) 11 * 12 * Contact Information: 13 * linux-drivers@avagotech.com 14 * 15 * Avago Technologies 16 * 3333 Susan Street 17 * Costa Mesa, CA 92626 18 */ 19 20 #include <linux/reboot.h> 21 #include <linux/delay.h> 22 #include <linux/slab.h> 23 #include <linux/interrupt.h> 24 #include <linux/blkdev.h> 25 #include <linux/pci.h> 26 #include <linux/string.h> 27 #include <linux/kernel.h> 28 #include <linux/semaphore.h> 29 #include <linux/iscsi_boot_sysfs.h> 30 #include <linux/module.h> 31 #include <linux/bsg-lib.h> 32 33 #include <scsi/libiscsi.h> 34 #include <scsi/scsi_bsg_iscsi.h> 35 #include <scsi/scsi_netlink.h> 36 #include <scsi/scsi_transport_iscsi.h> 37 #include <scsi/scsi_transport.h> 38 #include <scsi/scsi_cmnd.h> 39 #include <scsi/scsi_device.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi.h> 42 #include "be_main.h" 43 #include "be_iscsi.h" 44 #include "be_mgmt.h" 45 #include "be_cmds.h" 46 47 static unsigned int be_iopoll_budget = 10; 48 static unsigned int be_max_phys_size = 64; 49 static unsigned int enable_msix = 1; 50 51 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR); 52 MODULE_VERSION(BUILD_STR); 53 MODULE_AUTHOR("Avago Technologies"); 54 MODULE_LICENSE("GPL"); 55 module_param(be_iopoll_budget, int, 0); 56 module_param(enable_msix, int, 0); 57 module_param(be_max_phys_size, uint, S_IRUGO); 58 MODULE_PARM_DESC(be_max_phys_size, 59 "Maximum Size (In Kilobytes) of physically contiguous " 60 "memory that can be allocated. Range is 16 - 128"); 61 62 #define beiscsi_disp_param(_name)\ 63 ssize_t \ 64 beiscsi_##_name##_disp(struct device *dev,\ 65 struct device_attribute *attrib, char *buf) \ 66 { \ 67 struct Scsi_Host *shost = class_to_shost(dev);\ 68 struct beiscsi_hba *phba = iscsi_host_priv(shost); \ 69 uint32_t param_val = 0; \ 70 param_val = phba->attr_##_name;\ 71 return snprintf(buf, PAGE_SIZE, "%d\n",\ 72 phba->attr_##_name);\ 73 } 74 75 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\ 76 int \ 77 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\ 78 {\ 79 if (val >= _minval && val <= _maxval) {\ 80 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 81 "BA_%d : beiscsi_"#_name" updated "\ 82 "from 0x%x ==> 0x%x\n",\ 83 phba->attr_##_name, val); \ 84 phba->attr_##_name = val;\ 85 return 0;\ 86 } \ 87 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \ 88 "BA_%d beiscsi_"#_name" attribute "\ 89 "cannot be updated to 0x%x, "\ 90 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 91 return -EINVAL;\ 92 } 93 94 #define beiscsi_store_param(_name) \ 95 ssize_t \ 96 beiscsi_##_name##_store(struct device *dev,\ 97 struct device_attribute *attr, const char *buf,\ 98 size_t count) \ 99 { \ 100 struct Scsi_Host *shost = class_to_shost(dev);\ 101 struct beiscsi_hba *phba = iscsi_host_priv(shost);\ 102 uint32_t param_val = 0;\ 103 if (!isdigit(buf[0]))\ 104 return -EINVAL;\ 105 if (sscanf(buf, "%i", ¶m_val) != 1)\ 106 return -EINVAL;\ 107 if (beiscsi_##_name##_change(phba, param_val) == 0) \ 108 return strlen(buf);\ 109 else \ 110 return -EINVAL;\ 111 } 112 113 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \ 114 int \ 115 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \ 116 { \ 117 if (val >= _minval && val <= _maxval) {\ 118 phba->attr_##_name = val;\ 119 return 0;\ 120 } \ 121 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\ 122 "BA_%d beiscsi_"#_name" attribute " \ 123 "cannot be updated to 0x%x, "\ 124 "range allowed is ["#_minval" - "#_maxval"]\n", val);\ 125 phba->attr_##_name = _defval;\ 126 return -EINVAL;\ 127 } 128 129 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \ 130 static uint beiscsi_##_name = _defval;\ 131 module_param(beiscsi_##_name, uint, S_IRUGO);\ 132 MODULE_PARM_DESC(beiscsi_##_name, _descp);\ 133 beiscsi_disp_param(_name)\ 134 beiscsi_change_param(_name, _minval, _maxval, _defval)\ 135 beiscsi_store_param(_name)\ 136 beiscsi_init_param(_name, _minval, _maxval, _defval)\ 137 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\ 138 beiscsi_##_name##_disp, beiscsi_##_name##_store) 139 140 /* 141 * When new log level added update the 142 * the MAX allowed value for log_enable 143 */ 144 BEISCSI_RW_ATTR(log_enable, 0x00, 145 0xFF, 0x00, "Enable logging Bit Mask\n" 146 "\t\t\t\tInitialization Events : 0x01\n" 147 "\t\t\t\tMailbox Events : 0x02\n" 148 "\t\t\t\tMiscellaneous Events : 0x04\n" 149 "\t\t\t\tError Handling : 0x08\n" 150 "\t\t\t\tIO Path Events : 0x10\n" 151 "\t\t\t\tConfiguration Path : 0x20\n" 152 "\t\t\t\tiSCSI Protocol : 0x40\n"); 153 154 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); 155 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); 156 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); 157 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL); 158 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO, 159 beiscsi_active_session_disp, NULL); 160 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO, 161 beiscsi_free_session_disp, NULL); 162 struct device_attribute *beiscsi_attrs[] = { 163 &dev_attr_beiscsi_log_enable, 164 &dev_attr_beiscsi_drvr_ver, 165 &dev_attr_beiscsi_adapter_family, 166 &dev_attr_beiscsi_fw_ver, 167 &dev_attr_beiscsi_active_session_count, 168 &dev_attr_beiscsi_free_session_count, 169 &dev_attr_beiscsi_phys_port, 170 NULL, 171 }; 172 173 static char const *cqe_desc[] = { 174 "RESERVED_DESC", 175 "SOL_CMD_COMPLETE", 176 "SOL_CMD_KILLED_DATA_DIGEST_ERR", 177 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL", 178 "CXN_KILLED_BURST_LEN_MISMATCH", 179 "CXN_KILLED_AHS_RCVD", 180 "CXN_KILLED_HDR_DIGEST_ERR", 181 "CXN_KILLED_UNKNOWN_HDR", 182 "CXN_KILLED_STALE_ITT_TTT_RCVD", 183 "CXN_KILLED_INVALID_ITT_TTT_RCVD", 184 "CXN_KILLED_RST_RCVD", 185 "CXN_KILLED_TIMED_OUT", 186 "CXN_KILLED_RST_SENT", 187 "CXN_KILLED_FIN_RCVD", 188 "CXN_KILLED_BAD_UNSOL_PDU_RCVD", 189 "CXN_KILLED_BAD_WRB_INDEX_ERROR", 190 "CXN_KILLED_OVER_RUN_RESIDUAL", 191 "CXN_KILLED_UNDER_RUN_RESIDUAL", 192 "CMD_KILLED_INVALID_STATSN_RCVD", 193 "CMD_KILLED_INVALID_R2T_RCVD", 194 "CMD_CXN_KILLED_LUN_INVALID", 195 "CMD_CXN_KILLED_ICD_INVALID", 196 "CMD_CXN_KILLED_ITT_INVALID", 197 "CMD_CXN_KILLED_SEQ_OUTOFORDER", 198 "CMD_CXN_KILLED_INVALID_DATASN_RCVD", 199 "CXN_INVALIDATE_NOTIFY", 200 "CXN_INVALIDATE_INDEX_NOTIFY", 201 "CMD_INVALIDATED_NOTIFY", 202 "UNSOL_HDR_NOTIFY", 203 "UNSOL_DATA_NOTIFY", 204 "UNSOL_DATA_DIGEST_ERROR_NOTIFY", 205 "DRIVERMSG_NOTIFY", 206 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN", 207 "SOL_CMD_KILLED_DIF_ERR", 208 "CXN_KILLED_SYN_RCVD", 209 "CXN_KILLED_IMM_DATA_RCVD" 210 }; 211 212 static int beiscsi_slave_configure(struct scsi_device *sdev) 213 { 214 blk_queue_max_segment_size(sdev->request_queue, 65536); 215 return 0; 216 } 217 218 static int beiscsi_eh_abort(struct scsi_cmnd *sc) 219 { 220 struct iscsi_cls_session *cls_session; 221 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr; 222 struct beiscsi_io_task *aborted_io_task; 223 struct iscsi_conn *conn; 224 struct beiscsi_conn *beiscsi_conn; 225 struct beiscsi_hba *phba; 226 struct iscsi_session *session; 227 struct invalidate_command_table *inv_tbl; 228 struct be_dma_mem nonemb_cmd; 229 unsigned int cid, tag, num_invalidate; 230 int rc; 231 232 cls_session = starget_to_session(scsi_target(sc->device)); 233 session = cls_session->dd_data; 234 235 spin_lock_bh(&session->frwd_lock); 236 if (!aborted_task || !aborted_task->sc) { 237 /* we raced */ 238 spin_unlock_bh(&session->frwd_lock); 239 return SUCCESS; 240 } 241 242 aborted_io_task = aborted_task->dd_data; 243 if (!aborted_io_task->scsi_cmnd) { 244 /* raced or invalid command */ 245 spin_unlock_bh(&session->frwd_lock); 246 return SUCCESS; 247 } 248 spin_unlock_bh(&session->frwd_lock); 249 /* Invalidate WRB Posted for this Task */ 250 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 251 aborted_io_task->pwrb_handle->pwrb, 252 1); 253 254 conn = aborted_task->conn; 255 beiscsi_conn = conn->dd_data; 256 phba = beiscsi_conn->phba; 257 258 /* invalidate iocb */ 259 cid = beiscsi_conn->beiscsi_conn_cid; 260 inv_tbl = phba->inv_tbl; 261 memset(inv_tbl, 0x0, sizeof(*inv_tbl)); 262 inv_tbl->cid = cid; 263 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index; 264 num_invalidate = 1; 265 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 266 sizeof(struct invalidate_commands_params_in), 267 &nonemb_cmd.dma); 268 if (nonemb_cmd.va == NULL) { 269 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 270 "BM_%d : Failed to allocate memory for" 271 "mgmt_invalidate_icds\n"); 272 return FAILED; 273 } 274 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 275 276 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 277 cid, &nonemb_cmd); 278 if (!tag) { 279 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 280 "BM_%d : mgmt_invalidate_icds could not be" 281 "submitted\n"); 282 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 283 nonemb_cmd.va, nonemb_cmd.dma); 284 285 return FAILED; 286 } 287 288 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 289 if (rc != -EBUSY) 290 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 291 nonemb_cmd.va, nonemb_cmd.dma); 292 293 return iscsi_eh_abort(sc); 294 } 295 296 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc) 297 { 298 struct iscsi_task *abrt_task; 299 struct beiscsi_io_task *abrt_io_task; 300 struct iscsi_conn *conn; 301 struct beiscsi_conn *beiscsi_conn; 302 struct beiscsi_hba *phba; 303 struct iscsi_session *session; 304 struct iscsi_cls_session *cls_session; 305 struct invalidate_command_table *inv_tbl; 306 struct be_dma_mem nonemb_cmd; 307 unsigned int cid, tag, i, num_invalidate; 308 int rc; 309 310 /* invalidate iocbs */ 311 cls_session = starget_to_session(scsi_target(sc->device)); 312 session = cls_session->dd_data; 313 spin_lock_bh(&session->frwd_lock); 314 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) { 315 spin_unlock_bh(&session->frwd_lock); 316 return FAILED; 317 } 318 conn = session->leadconn; 319 beiscsi_conn = conn->dd_data; 320 phba = beiscsi_conn->phba; 321 cid = beiscsi_conn->beiscsi_conn_cid; 322 inv_tbl = phba->inv_tbl; 323 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN); 324 num_invalidate = 0; 325 for (i = 0; i < conn->session->cmds_max; i++) { 326 abrt_task = conn->session->cmds[i]; 327 abrt_io_task = abrt_task->dd_data; 328 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE) 329 continue; 330 331 if (sc->device->lun != abrt_task->sc->device->lun) 332 continue; 333 334 /* Invalidate WRB Posted for this Task */ 335 AMAP_SET_BITS(struct amap_iscsi_wrb, invld, 336 abrt_io_task->pwrb_handle->pwrb, 337 1); 338 339 inv_tbl->cid = cid; 340 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index; 341 num_invalidate++; 342 inv_tbl++; 343 } 344 spin_unlock_bh(&session->frwd_lock); 345 inv_tbl = phba->inv_tbl; 346 347 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 348 sizeof(struct invalidate_commands_params_in), 349 &nonemb_cmd.dma); 350 if (nonemb_cmd.va == NULL) { 351 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH, 352 "BM_%d : Failed to allocate memory for" 353 "mgmt_invalidate_icds\n"); 354 return FAILED; 355 } 356 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in); 357 memset(nonemb_cmd.va, 0, nonemb_cmd.size); 358 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, 359 cid, &nonemb_cmd); 360 if (!tag) { 361 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH, 362 "BM_%d : mgmt_invalidate_icds could not be" 363 " submitted\n"); 364 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 365 nonemb_cmd.va, nonemb_cmd.dma); 366 return FAILED; 367 } 368 369 rc = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 370 if (rc != -EBUSY) 371 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 372 nonemb_cmd.va, nonemb_cmd.dma); 373 return iscsi_eh_device_reset(sc); 374 } 375 376 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf) 377 { 378 struct beiscsi_hba *phba = data; 379 struct mgmt_session_info *boot_sess = &phba->boot_sess; 380 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0]; 381 char *str = buf; 382 int rc; 383 384 switch (type) { 385 case ISCSI_BOOT_TGT_NAME: 386 rc = sprintf(buf, "%.*s\n", 387 (int)strlen(boot_sess->target_name), 388 (char *)&boot_sess->target_name); 389 break; 390 case ISCSI_BOOT_TGT_IP_ADDR: 391 if (boot_conn->dest_ipaddr.ip_type == 0x1) 392 rc = sprintf(buf, "%pI4\n", 393 (char *)&boot_conn->dest_ipaddr.addr); 394 else 395 rc = sprintf(str, "%pI6\n", 396 (char *)&boot_conn->dest_ipaddr.addr); 397 break; 398 case ISCSI_BOOT_TGT_PORT: 399 rc = sprintf(str, "%d\n", boot_conn->dest_port); 400 break; 401 402 case ISCSI_BOOT_TGT_CHAP_NAME: 403 rc = sprintf(str, "%.*s\n", 404 boot_conn->negotiated_login_options.auth_data.chap. 405 target_chap_name_length, 406 (char *)&boot_conn->negotiated_login_options. 407 auth_data.chap.target_chap_name); 408 break; 409 case ISCSI_BOOT_TGT_CHAP_SECRET: 410 rc = sprintf(str, "%.*s\n", 411 boot_conn->negotiated_login_options.auth_data.chap. 412 target_secret_length, 413 (char *)&boot_conn->negotiated_login_options. 414 auth_data.chap.target_secret); 415 break; 416 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 417 rc = sprintf(str, "%.*s\n", 418 boot_conn->negotiated_login_options.auth_data.chap. 419 intr_chap_name_length, 420 (char *)&boot_conn->negotiated_login_options. 421 auth_data.chap.intr_chap_name); 422 break; 423 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 424 rc = sprintf(str, "%.*s\n", 425 boot_conn->negotiated_login_options.auth_data.chap. 426 intr_secret_length, 427 (char *)&boot_conn->negotiated_login_options. 428 auth_data.chap.intr_secret); 429 break; 430 case ISCSI_BOOT_TGT_FLAGS: 431 rc = sprintf(str, "2\n"); 432 break; 433 case ISCSI_BOOT_TGT_NIC_ASSOC: 434 rc = sprintf(str, "0\n"); 435 break; 436 default: 437 rc = -ENOSYS; 438 break; 439 } 440 return rc; 441 } 442 443 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf) 444 { 445 struct beiscsi_hba *phba = data; 446 char *str = buf; 447 int rc; 448 449 switch (type) { 450 case ISCSI_BOOT_INI_INITIATOR_NAME: 451 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname); 452 break; 453 default: 454 rc = -ENOSYS; 455 break; 456 } 457 return rc; 458 } 459 460 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf) 461 { 462 struct beiscsi_hba *phba = data; 463 char *str = buf; 464 int rc; 465 466 switch (type) { 467 case ISCSI_BOOT_ETH_FLAGS: 468 rc = sprintf(str, "2\n"); 469 break; 470 case ISCSI_BOOT_ETH_INDEX: 471 rc = sprintf(str, "0\n"); 472 break; 473 case ISCSI_BOOT_ETH_MAC: 474 rc = beiscsi_get_macaddr(str, phba); 475 break; 476 default: 477 rc = -ENOSYS; 478 break; 479 } 480 return rc; 481 } 482 483 484 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type) 485 { 486 umode_t rc; 487 488 switch (type) { 489 case ISCSI_BOOT_TGT_NAME: 490 case ISCSI_BOOT_TGT_IP_ADDR: 491 case ISCSI_BOOT_TGT_PORT: 492 case ISCSI_BOOT_TGT_CHAP_NAME: 493 case ISCSI_BOOT_TGT_CHAP_SECRET: 494 case ISCSI_BOOT_TGT_REV_CHAP_NAME: 495 case ISCSI_BOOT_TGT_REV_CHAP_SECRET: 496 case ISCSI_BOOT_TGT_NIC_ASSOC: 497 case ISCSI_BOOT_TGT_FLAGS: 498 rc = S_IRUGO; 499 break; 500 default: 501 rc = 0; 502 break; 503 } 504 return rc; 505 } 506 507 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type) 508 { 509 umode_t rc; 510 511 switch (type) { 512 case ISCSI_BOOT_INI_INITIATOR_NAME: 513 rc = S_IRUGO; 514 break; 515 default: 516 rc = 0; 517 break; 518 } 519 return rc; 520 } 521 522 523 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type) 524 { 525 umode_t rc; 526 527 switch (type) { 528 case ISCSI_BOOT_ETH_FLAGS: 529 case ISCSI_BOOT_ETH_MAC: 530 case ISCSI_BOOT_ETH_INDEX: 531 rc = S_IRUGO; 532 break; 533 default: 534 rc = 0; 535 break; 536 } 537 return rc; 538 } 539 540 /*------------------- PCI Driver operations and data ----------------- */ 541 static const struct pci_device_id beiscsi_pci_id_table[] = { 542 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 543 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 544 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 545 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 546 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) }, 547 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) }, 548 { 0 } 549 }; 550 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table); 551 552 553 static struct scsi_host_template beiscsi_sht = { 554 .module = THIS_MODULE, 555 .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver", 556 .proc_name = DRV_NAME, 557 .queuecommand = iscsi_queuecommand, 558 .change_queue_depth = scsi_change_queue_depth, 559 .slave_configure = beiscsi_slave_configure, 560 .target_alloc = iscsi_target_alloc, 561 .eh_abort_handler = beiscsi_eh_abort, 562 .eh_device_reset_handler = beiscsi_eh_device_reset, 563 .eh_target_reset_handler = iscsi_eh_session_reset, 564 .shost_attrs = beiscsi_attrs, 565 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS, 566 .can_queue = BE2_IO_DEPTH, 567 .this_id = -1, 568 .max_sectors = BEISCSI_MAX_SECTORS, 569 .cmd_per_lun = BEISCSI_CMD_PER_LUN, 570 .use_clustering = ENABLE_CLUSTERING, 571 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID, 572 .track_queue_depth = 1, 573 }; 574 575 static struct scsi_transport_template *beiscsi_scsi_transport; 576 577 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) 578 { 579 struct beiscsi_hba *phba; 580 struct Scsi_Host *shost; 581 582 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0); 583 if (!shost) { 584 dev_err(&pcidev->dev, 585 "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); 586 return NULL; 587 } 588 shost->max_id = BE2_MAX_SESSIONS; 589 shost->max_channel = 0; 590 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; 591 shost->max_lun = BEISCSI_NUM_MAX_LUN; 592 shost->transportt = beiscsi_scsi_transport; 593 phba = iscsi_host_priv(shost); 594 memset(phba, 0, sizeof(*phba)); 595 phba->shost = shost; 596 phba->pcidev = pci_dev_get(pcidev); 597 pci_set_drvdata(pcidev, phba); 598 phba->interface_handle = 0xFFFFFFFF; 599 600 return phba; 601 } 602 603 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba) 604 { 605 if (phba->csr_va) { 606 iounmap(phba->csr_va); 607 phba->csr_va = NULL; 608 } 609 if (phba->db_va) { 610 iounmap(phba->db_va); 611 phba->db_va = NULL; 612 } 613 if (phba->pci_va) { 614 iounmap(phba->pci_va); 615 phba->pci_va = NULL; 616 } 617 } 618 619 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, 620 struct pci_dev *pcidev) 621 { 622 u8 __iomem *addr; 623 int pcicfg_reg; 624 625 addr = ioremap_nocache(pci_resource_start(pcidev, 2), 626 pci_resource_len(pcidev, 2)); 627 if (addr == NULL) 628 return -ENOMEM; 629 phba->ctrl.csr = addr; 630 phba->csr_va = addr; 631 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2); 632 633 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); 634 if (addr == NULL) 635 goto pci_map_err; 636 phba->ctrl.db = addr; 637 phba->db_va = addr; 638 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4); 639 640 if (phba->generation == BE_GEN2) 641 pcicfg_reg = 1; 642 else 643 pcicfg_reg = 0; 644 645 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), 646 pci_resource_len(pcidev, pcicfg_reg)); 647 648 if (addr == NULL) 649 goto pci_map_err; 650 phba->ctrl.pcicfg = addr; 651 phba->pci_va = addr; 652 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg); 653 return 0; 654 655 pci_map_err: 656 beiscsi_unmap_pci_function(phba); 657 return -ENOMEM; 658 } 659 660 static int beiscsi_enable_pci(struct pci_dev *pcidev) 661 { 662 int ret; 663 664 ret = pci_enable_device(pcidev); 665 if (ret) { 666 dev_err(&pcidev->dev, 667 "beiscsi_enable_pci - enable device failed\n"); 668 return ret; 669 } 670 671 ret = pci_request_regions(pcidev, DRV_NAME); 672 if (ret) { 673 dev_err(&pcidev->dev, 674 "beiscsi_enable_pci - request region failed\n"); 675 goto pci_dev_disable; 676 } 677 678 pci_set_master(pcidev); 679 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); 680 if (ret) { 681 ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); 682 if (ret) { 683 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 684 goto pci_region_release; 685 } else { 686 ret = pci_set_consistent_dma_mask(pcidev, 687 DMA_BIT_MASK(32)); 688 } 689 } else { 690 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64)); 691 if (ret) { 692 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n"); 693 goto pci_region_release; 694 } 695 } 696 return 0; 697 698 pci_region_release: 699 pci_release_regions(pcidev); 700 pci_dev_disable: 701 pci_disable_device(pcidev); 702 703 return ret; 704 } 705 706 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev) 707 { 708 struct be_ctrl_info *ctrl = &phba->ctrl; 709 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced; 710 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem; 711 int status = 0; 712 713 ctrl->pdev = pdev; 714 status = beiscsi_map_pci_bars(phba, pdev); 715 if (status) 716 return status; 717 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16; 718 mbox_mem_alloc->va = pci_alloc_consistent(pdev, 719 mbox_mem_alloc->size, 720 &mbox_mem_alloc->dma); 721 if (!mbox_mem_alloc->va) { 722 beiscsi_unmap_pci_function(phba); 723 return -ENOMEM; 724 } 725 726 mbox_mem_align->size = sizeof(struct be_mcc_mailbox); 727 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16); 728 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16); 729 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox)); 730 spin_lock_init(&ctrl->mbox_lock); 731 spin_lock_init(&phba->ctrl.mcc_lock); 732 spin_lock_init(&phba->ctrl.mcc_cq_lock); 733 734 return status; 735 } 736 737 /** 738 * beiscsi_get_params()- Set the config paramters 739 * @phba: ptr device priv structure 740 **/ 741 static void beiscsi_get_params(struct beiscsi_hba *phba) 742 { 743 uint32_t total_cid_count = 0; 744 uint32_t total_icd_count = 0; 745 uint8_t ulp_num = 0; 746 747 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) + 748 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1); 749 750 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 751 uint32_t align_mask = 0; 752 uint32_t icd_post_per_page = 0; 753 uint32_t icd_count_unavailable = 0; 754 uint32_t icd_start = 0, icd_count = 0; 755 uint32_t icd_start_align = 0, icd_count_align = 0; 756 757 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 758 icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 759 icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 760 761 /* Get ICD count that can be posted on each page */ 762 icd_post_per_page = (PAGE_SIZE / (BE2_SGE * 763 sizeof(struct iscsi_sge))); 764 align_mask = (icd_post_per_page - 1); 765 766 /* Check if icd_start is aligned ICD per page posting */ 767 if (icd_start % icd_post_per_page) { 768 icd_start_align = ((icd_start + 769 icd_post_per_page) & 770 ~(align_mask)); 771 phba->fw_config. 772 iscsi_icd_start[ulp_num] = 773 icd_start_align; 774 } 775 776 icd_count_align = (icd_count & ~align_mask); 777 778 /* ICD discarded in the process of alignment */ 779 if (icd_start_align) 780 icd_count_unavailable = ((icd_start_align - 781 icd_start) + 782 (icd_count - 783 icd_count_align)); 784 785 /* Updated ICD count available */ 786 phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count - 787 icd_count_unavailable); 788 789 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 790 "BM_%d : Aligned ICD values\n" 791 "\t ICD Start : %d\n" 792 "\t ICD Count : %d\n" 793 "\t ICD Discarded : %d\n", 794 phba->fw_config. 795 iscsi_icd_start[ulp_num], 796 phba->fw_config. 797 iscsi_icd_count[ulp_num], 798 icd_count_unavailable); 799 break; 800 } 801 } 802 803 total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num]; 804 phba->params.ios_per_ctrl = (total_icd_count - 805 (total_cid_count + 806 BE2_TMFS + BE2_NOPOUT_REQ)); 807 phba->params.cxns_per_ctrl = total_cid_count; 808 phba->params.asyncpdus_per_ctrl = total_cid_count; 809 phba->params.icds_per_ctrl = total_icd_count; 810 phba->params.num_sge_per_io = BE2_SGE; 811 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; 812 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ; 813 phba->params.eq_timer = 64; 814 phba->params.num_eq_entries = 1024; 815 phba->params.num_cq_entries = 1024; 816 phba->params.wrbs_per_cxn = 256; 817 } 818 819 static void hwi_ring_eq_db(struct beiscsi_hba *phba, 820 unsigned int id, unsigned int clr_interrupt, 821 unsigned int num_processed, 822 unsigned char rearm, unsigned char event) 823 { 824 u32 val = 0; 825 826 if (rearm) 827 val |= 1 << DB_EQ_REARM_SHIFT; 828 if (clr_interrupt) 829 val |= 1 << DB_EQ_CLR_SHIFT; 830 if (event) 831 val |= 1 << DB_EQ_EVNT_SHIFT; 832 833 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT; 834 /* Setting lower order EQ_ID Bits */ 835 val |= (id & DB_EQ_RING_ID_LOW_MASK); 836 837 /* Setting Higher order EQ_ID Bits */ 838 val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) & 839 DB_EQ_RING_ID_HIGH_MASK) 840 << DB_EQ_HIGH_SET_SHIFT); 841 842 iowrite32(val, phba->db_va + DB_EQ_OFFSET); 843 } 844 845 /** 846 * be_isr_mcc - The isr routine of the driver. 847 * @irq: Not used 848 * @dev_id: Pointer to host adapter structure 849 */ 850 static irqreturn_t be_isr_mcc(int irq, void *dev_id) 851 { 852 struct beiscsi_hba *phba; 853 struct be_eq_entry *eqe = NULL; 854 struct be_queue_info *eq; 855 struct be_queue_info *mcc; 856 unsigned int num_eq_processed; 857 struct be_eq_obj *pbe_eq; 858 unsigned long flags; 859 860 pbe_eq = dev_id; 861 eq = &pbe_eq->q; 862 phba = pbe_eq->phba; 863 mcc = &phba->ctrl.mcc_obj.cq; 864 eqe = queue_tail_node(eq); 865 866 num_eq_processed = 0; 867 868 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 869 & EQE_VALID_MASK) { 870 if (((eqe->dw[offsetof(struct amap_eq_entry, 871 resource_id) / 32] & 872 EQE_RESID_MASK) >> 16) == mcc->id) { 873 spin_lock_irqsave(&phba->isr_lock, flags); 874 pbe_eq->todo_mcc_cq = true; 875 spin_unlock_irqrestore(&phba->isr_lock, flags); 876 } 877 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 878 queue_tail_inc(eq); 879 eqe = queue_tail_node(eq); 880 num_eq_processed++; 881 } 882 if (pbe_eq->todo_mcc_cq) 883 queue_work(phba->wq, &pbe_eq->work_cqs); 884 if (num_eq_processed) 885 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1); 886 887 return IRQ_HANDLED; 888 } 889 890 /** 891 * be_isr_msix - The isr routine of the driver. 892 * @irq: Not used 893 * @dev_id: Pointer to host adapter structure 894 */ 895 static irqreturn_t be_isr_msix(int irq, void *dev_id) 896 { 897 struct beiscsi_hba *phba; 898 struct be_eq_entry *eqe = NULL; 899 struct be_queue_info *eq; 900 struct be_queue_info *cq; 901 unsigned int num_eq_processed; 902 struct be_eq_obj *pbe_eq; 903 904 pbe_eq = dev_id; 905 eq = &pbe_eq->q; 906 cq = pbe_eq->cq; 907 eqe = queue_tail_node(eq); 908 909 phba = pbe_eq->phba; 910 num_eq_processed = 0; 911 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 912 & EQE_VALID_MASK) { 913 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 914 blk_iopoll_sched(&pbe_eq->iopoll); 915 916 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 917 queue_tail_inc(eq); 918 eqe = queue_tail_node(eq); 919 num_eq_processed++; 920 } 921 922 if (num_eq_processed) 923 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1); 924 925 return IRQ_HANDLED; 926 } 927 928 /** 929 * be_isr - The isr routine of the driver. 930 * @irq: Not used 931 * @dev_id: Pointer to host adapter structure 932 */ 933 static irqreturn_t be_isr(int irq, void *dev_id) 934 { 935 struct beiscsi_hba *phba; 936 struct hwi_controller *phwi_ctrlr; 937 struct hwi_context_memory *phwi_context; 938 struct be_eq_entry *eqe = NULL; 939 struct be_queue_info *eq; 940 struct be_queue_info *mcc; 941 unsigned long flags, index; 942 unsigned int num_mcceq_processed, num_ioeq_processed; 943 struct be_ctrl_info *ctrl; 944 struct be_eq_obj *pbe_eq; 945 int isr; 946 947 phba = dev_id; 948 ctrl = &phba->ctrl; 949 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET + 950 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE)); 951 if (!isr) 952 return IRQ_NONE; 953 954 phwi_ctrlr = phba->phwi_ctrlr; 955 phwi_context = phwi_ctrlr->phwi_ctxt; 956 pbe_eq = &phwi_context->be_eq[0]; 957 958 eq = &phwi_context->be_eq[0].q; 959 mcc = &phba->ctrl.mcc_obj.cq; 960 index = 0; 961 eqe = queue_tail_node(eq); 962 963 num_ioeq_processed = 0; 964 num_mcceq_processed = 0; 965 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 966 & EQE_VALID_MASK) { 967 if (((eqe->dw[offsetof(struct amap_eq_entry, 968 resource_id) / 32] & 969 EQE_RESID_MASK) >> 16) == mcc->id) { 970 spin_lock_irqsave(&phba->isr_lock, flags); 971 pbe_eq->todo_mcc_cq = true; 972 spin_unlock_irqrestore(&phba->isr_lock, flags); 973 num_mcceq_processed++; 974 } else { 975 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll)) 976 blk_iopoll_sched(&pbe_eq->iopoll); 977 num_ioeq_processed++; 978 } 979 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 980 queue_tail_inc(eq); 981 eqe = queue_tail_node(eq); 982 } 983 if (num_ioeq_processed || num_mcceq_processed) { 984 if (pbe_eq->todo_mcc_cq) 985 queue_work(phba->wq, &pbe_eq->work_cqs); 986 987 if ((num_mcceq_processed) && (!num_ioeq_processed)) 988 hwi_ring_eq_db(phba, eq->id, 0, 989 (num_ioeq_processed + 990 num_mcceq_processed) , 1, 1); 991 else 992 hwi_ring_eq_db(phba, eq->id, 0, 993 (num_ioeq_processed + 994 num_mcceq_processed), 0, 1); 995 996 return IRQ_HANDLED; 997 } else 998 return IRQ_NONE; 999 } 1000 1001 static int beiscsi_init_irqs(struct beiscsi_hba *phba) 1002 { 1003 struct pci_dev *pcidev = phba->pcidev; 1004 struct hwi_controller *phwi_ctrlr; 1005 struct hwi_context_memory *phwi_context; 1006 int ret, msix_vec, i, j; 1007 1008 phwi_ctrlr = phba->phwi_ctrlr; 1009 phwi_context = phwi_ctrlr->phwi_ctxt; 1010 1011 if (phba->msix_enabled) { 1012 for (i = 0; i < phba->num_cpus; i++) { 1013 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, 1014 GFP_KERNEL); 1015 if (!phba->msi_name[i]) { 1016 ret = -ENOMEM; 1017 goto free_msix_irqs; 1018 } 1019 1020 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", 1021 phba->shost->host_no, i); 1022 msix_vec = phba->msix_entries[i].vector; 1023 ret = request_irq(msix_vec, be_isr_msix, 0, 1024 phba->msi_name[i], 1025 &phwi_context->be_eq[i]); 1026 if (ret) { 1027 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1028 "BM_%d : beiscsi_init_irqs-Failed to" 1029 "register msix for i = %d\n", 1030 i); 1031 kfree(phba->msi_name[i]); 1032 goto free_msix_irqs; 1033 } 1034 } 1035 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); 1036 if (!phba->msi_name[i]) { 1037 ret = -ENOMEM; 1038 goto free_msix_irqs; 1039 } 1040 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", 1041 phba->shost->host_no); 1042 msix_vec = phba->msix_entries[i].vector; 1043 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], 1044 &phwi_context->be_eq[i]); 1045 if (ret) { 1046 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT , 1047 "BM_%d : beiscsi_init_irqs-" 1048 "Failed to register beiscsi_msix_mcc\n"); 1049 kfree(phba->msi_name[i]); 1050 goto free_msix_irqs; 1051 } 1052 1053 } else { 1054 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED, 1055 "beiscsi", phba); 1056 if (ret) { 1057 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 1058 "BM_%d : beiscsi_init_irqs-" 1059 "Failed to register irq\\n"); 1060 return ret; 1061 } 1062 } 1063 return 0; 1064 free_msix_irqs: 1065 for (j = i - 1; j >= 0; j--) { 1066 kfree(phba->msi_name[j]); 1067 msix_vec = phba->msix_entries[j].vector; 1068 free_irq(msix_vec, &phwi_context->be_eq[j]); 1069 } 1070 return ret; 1071 } 1072 1073 void hwi_ring_cq_db(struct beiscsi_hba *phba, 1074 unsigned int id, unsigned int num_processed, 1075 unsigned char rearm, unsigned char event) 1076 { 1077 u32 val = 0; 1078 1079 if (rearm) 1080 val |= 1 << DB_CQ_REARM_SHIFT; 1081 1082 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT; 1083 1084 /* Setting lower order CQ_ID Bits */ 1085 val |= (id & DB_CQ_RING_ID_LOW_MASK); 1086 1087 /* Setting Higher order CQ_ID Bits */ 1088 val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) & 1089 DB_CQ_RING_ID_HIGH_MASK) 1090 << DB_CQ_HIGH_SET_SHIFT); 1091 1092 iowrite32(val, phba->db_va + DB_CQ_OFFSET); 1093 } 1094 1095 static unsigned int 1096 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, 1097 struct beiscsi_hba *phba, 1098 struct pdu_base *ppdu, 1099 unsigned long pdu_len, 1100 void *pbuffer, unsigned long buf_len) 1101 { 1102 struct iscsi_conn *conn = beiscsi_conn->conn; 1103 struct iscsi_session *session = conn->session; 1104 struct iscsi_task *task; 1105 struct beiscsi_io_task *io_task; 1106 struct iscsi_hdr *login_hdr; 1107 1108 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] & 1109 PDUBASE_OPCODE_MASK) { 1110 case ISCSI_OP_NOOP_IN: 1111 pbuffer = NULL; 1112 buf_len = 0; 1113 break; 1114 case ISCSI_OP_ASYNC_EVENT: 1115 break; 1116 case ISCSI_OP_REJECT: 1117 WARN_ON(!pbuffer); 1118 WARN_ON(!(buf_len == 48)); 1119 beiscsi_log(phba, KERN_ERR, 1120 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1121 "BM_%d : In ISCSI_OP_REJECT\n"); 1122 break; 1123 case ISCSI_OP_LOGIN_RSP: 1124 case ISCSI_OP_TEXT_RSP: 1125 task = conn->login_task; 1126 io_task = task->dd_data; 1127 login_hdr = (struct iscsi_hdr *)ppdu; 1128 login_hdr->itt = io_task->libiscsi_itt; 1129 break; 1130 default: 1131 beiscsi_log(phba, KERN_WARNING, 1132 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1133 "BM_%d : Unrecognized opcode 0x%x in async msg\n", 1134 (ppdu-> 1135 dw[offsetof(struct amap_pdu_base, opcode) / 32] 1136 & PDUBASE_OPCODE_MASK)); 1137 return 1; 1138 } 1139 1140 spin_lock_bh(&session->back_lock); 1141 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len); 1142 spin_unlock_bh(&session->back_lock); 1143 return 0; 1144 } 1145 1146 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba) 1147 { 1148 struct sgl_handle *psgl_handle; 1149 1150 if (phba->io_sgl_hndl_avbl) { 1151 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1152 "BM_%d : In alloc_io_sgl_handle," 1153 " io_sgl_alloc_index=%d\n", 1154 phba->io_sgl_alloc_index); 1155 1156 psgl_handle = phba->io_sgl_hndl_base[phba-> 1157 io_sgl_alloc_index]; 1158 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL; 1159 phba->io_sgl_hndl_avbl--; 1160 if (phba->io_sgl_alloc_index == (phba->params. 1161 ios_per_ctrl - 1)) 1162 phba->io_sgl_alloc_index = 0; 1163 else 1164 phba->io_sgl_alloc_index++; 1165 } else 1166 psgl_handle = NULL; 1167 return psgl_handle; 1168 } 1169 1170 static void 1171 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1172 { 1173 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1174 "BM_%d : In free_,io_sgl_free_index=%d\n", 1175 phba->io_sgl_free_index); 1176 1177 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) { 1178 /* 1179 * this can happen if clean_task is called on a task that 1180 * failed in xmit_task or alloc_pdu. 1181 */ 1182 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO, 1183 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d," 1184 "value there=%p\n", phba->io_sgl_free_index, 1185 phba->io_sgl_hndl_base 1186 [phba->io_sgl_free_index]); 1187 return; 1188 } 1189 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle; 1190 phba->io_sgl_hndl_avbl++; 1191 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1)) 1192 phba->io_sgl_free_index = 0; 1193 else 1194 phba->io_sgl_free_index++; 1195 } 1196 1197 /** 1198 * alloc_wrb_handle - To allocate a wrb handle 1199 * @phba: The hba pointer 1200 * @cid: The cid to use for allocation 1201 * 1202 * This happens under session_lock until submission to chip 1203 */ 1204 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) 1205 { 1206 struct hwi_wrb_context *pwrb_context; 1207 struct hwi_controller *phwi_ctrlr; 1208 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; 1209 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); 1210 1211 phwi_ctrlr = phba->phwi_ctrlr; 1212 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1213 if (pwrb_context->wrb_handles_available >= 2) { 1214 pwrb_handle = pwrb_context->pwrb_handle_base[ 1215 pwrb_context->alloc_index]; 1216 pwrb_context->wrb_handles_available--; 1217 if (pwrb_context->alloc_index == 1218 (phba->params.wrbs_per_cxn - 1)) 1219 pwrb_context->alloc_index = 0; 1220 else 1221 pwrb_context->alloc_index++; 1222 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[ 1223 pwrb_context->alloc_index]; 1224 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index; 1225 } else 1226 pwrb_handle = NULL; 1227 return pwrb_handle; 1228 } 1229 1230 /** 1231 * free_wrb_handle - To free the wrb handle back to pool 1232 * @phba: The hba pointer 1233 * @pwrb_context: The context to free from 1234 * @pwrb_handle: The wrb_handle to free 1235 * 1236 * This happens under session_lock until submission to chip 1237 */ 1238 static void 1239 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context, 1240 struct wrb_handle *pwrb_handle) 1241 { 1242 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle; 1243 pwrb_context->wrb_handles_available++; 1244 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1)) 1245 pwrb_context->free_index = 0; 1246 else 1247 pwrb_context->free_index++; 1248 1249 beiscsi_log(phba, KERN_INFO, 1250 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1251 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x" 1252 "wrb_handles_available=%d\n", 1253 pwrb_handle, pwrb_context->free_index, 1254 pwrb_context->wrb_handles_available); 1255 } 1256 1257 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba) 1258 { 1259 struct sgl_handle *psgl_handle; 1260 1261 if (phba->eh_sgl_hndl_avbl) { 1262 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index]; 1263 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL; 1264 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1265 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n", 1266 phba->eh_sgl_alloc_index, 1267 phba->eh_sgl_alloc_index); 1268 1269 phba->eh_sgl_hndl_avbl--; 1270 if (phba->eh_sgl_alloc_index == 1271 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1272 1)) 1273 phba->eh_sgl_alloc_index = 0; 1274 else 1275 phba->eh_sgl_alloc_index++; 1276 } else 1277 psgl_handle = NULL; 1278 return psgl_handle; 1279 } 1280 1281 void 1282 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle) 1283 { 1284 1285 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, 1286 "BM_%d : In free_mgmt_sgl_handle," 1287 "eh_sgl_free_index=%d\n", 1288 phba->eh_sgl_free_index); 1289 1290 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) { 1291 /* 1292 * this can happen if clean_task is called on a task that 1293 * failed in xmit_task or alloc_pdu. 1294 */ 1295 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, 1296 "BM_%d : Double Free in eh SGL ," 1297 "eh_sgl_free_index=%d\n", 1298 phba->eh_sgl_free_index); 1299 return; 1300 } 1301 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle; 1302 phba->eh_sgl_hndl_avbl++; 1303 if (phba->eh_sgl_free_index == 1304 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1)) 1305 phba->eh_sgl_free_index = 0; 1306 else 1307 phba->eh_sgl_free_index++; 1308 } 1309 1310 static void 1311 be_complete_io(struct beiscsi_conn *beiscsi_conn, 1312 struct iscsi_task *task, 1313 struct common_sol_cqe *csol_cqe) 1314 { 1315 struct beiscsi_io_task *io_task = task->dd_data; 1316 struct be_status_bhs *sts_bhs = 1317 (struct be_status_bhs *)io_task->cmd_bhs; 1318 struct iscsi_conn *conn = beiscsi_conn->conn; 1319 unsigned char *sense; 1320 u32 resid = 0, exp_cmdsn, max_cmdsn; 1321 u8 rsp, status, flags; 1322 1323 exp_cmdsn = csol_cqe->exp_cmdsn; 1324 max_cmdsn = (csol_cqe->exp_cmdsn + 1325 csol_cqe->cmd_wnd - 1); 1326 rsp = csol_cqe->i_resp; 1327 status = csol_cqe->i_sts; 1328 flags = csol_cqe->i_flags; 1329 resid = csol_cqe->res_cnt; 1330 1331 if (!task->sc) { 1332 if (io_task->scsi_cmnd) { 1333 scsi_dma_unmap(io_task->scsi_cmnd); 1334 io_task->scsi_cmnd = NULL; 1335 } 1336 1337 return; 1338 } 1339 task->sc->result = (DID_OK << 16) | status; 1340 if (rsp != ISCSI_STATUS_CMD_COMPLETED) { 1341 task->sc->result = DID_ERROR << 16; 1342 goto unmap; 1343 } 1344 1345 /* bidi not initially supported */ 1346 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) { 1347 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW)) 1348 task->sc->result = DID_ERROR << 16; 1349 1350 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) { 1351 scsi_set_resid(task->sc, resid); 1352 if (!status && (scsi_bufflen(task->sc) - resid < 1353 task->sc->underflow)) 1354 task->sc->result = DID_ERROR << 16; 1355 } 1356 } 1357 1358 if (status == SAM_STAT_CHECK_CONDITION) { 1359 u16 sense_len; 1360 unsigned short *slen = (unsigned short *)sts_bhs->sense_info; 1361 1362 sense = sts_bhs->sense_info + sizeof(unsigned short); 1363 sense_len = be16_to_cpu(*slen); 1364 memcpy(task->sc->sense_buffer, sense, 1365 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE)); 1366 } 1367 1368 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) 1369 conn->rxdata_octets += resid; 1370 unmap: 1371 if (io_task->scsi_cmnd) { 1372 scsi_dma_unmap(io_task->scsi_cmnd); 1373 io_task->scsi_cmnd = NULL; 1374 } 1375 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn); 1376 } 1377 1378 static void 1379 be_complete_logout(struct beiscsi_conn *beiscsi_conn, 1380 struct iscsi_task *task, 1381 struct common_sol_cqe *csol_cqe) 1382 { 1383 struct iscsi_logout_rsp *hdr; 1384 struct beiscsi_io_task *io_task = task->dd_data; 1385 struct iscsi_conn *conn = beiscsi_conn->conn; 1386 1387 hdr = (struct iscsi_logout_rsp *)task->hdr; 1388 hdr->opcode = ISCSI_OP_LOGOUT_RSP; 1389 hdr->t2wait = 5; 1390 hdr->t2retain = 0; 1391 hdr->flags = csol_cqe->i_flags; 1392 hdr->response = csol_cqe->i_resp; 1393 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1394 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1395 csol_cqe->cmd_wnd - 1); 1396 1397 hdr->dlength[0] = 0; 1398 hdr->dlength[1] = 0; 1399 hdr->dlength[2] = 0; 1400 hdr->hlength = 0; 1401 hdr->itt = io_task->libiscsi_itt; 1402 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1403 } 1404 1405 static void 1406 be_complete_tmf(struct beiscsi_conn *beiscsi_conn, 1407 struct iscsi_task *task, 1408 struct common_sol_cqe *csol_cqe) 1409 { 1410 struct iscsi_tm_rsp *hdr; 1411 struct iscsi_conn *conn = beiscsi_conn->conn; 1412 struct beiscsi_io_task *io_task = task->dd_data; 1413 1414 hdr = (struct iscsi_tm_rsp *)task->hdr; 1415 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; 1416 hdr->flags = csol_cqe->i_flags; 1417 hdr->response = csol_cqe->i_resp; 1418 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1419 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1420 csol_cqe->cmd_wnd - 1); 1421 1422 hdr->itt = io_task->libiscsi_itt; 1423 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1424 } 1425 1426 static void 1427 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, 1428 struct beiscsi_hba *phba, struct sol_cqe *psol) 1429 { 1430 struct hwi_wrb_context *pwrb_context; 1431 struct wrb_handle *pwrb_handle = NULL; 1432 struct hwi_controller *phwi_ctrlr; 1433 struct iscsi_task *task; 1434 struct beiscsi_io_task *io_task; 1435 uint16_t wrb_index, cid, cri_index; 1436 1437 phwi_ctrlr = phba->phwi_ctrlr; 1438 if (is_chip_be2_be3r(phba)) { 1439 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1440 wrb_idx, psol); 1441 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, 1442 cid, psol); 1443 } else { 1444 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1445 wrb_idx, psol); 1446 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, 1447 cid, psol); 1448 } 1449 1450 cri_index = BE_GET_CRI_FROM_CID(cid); 1451 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1452 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; 1453 task = pwrb_handle->pio_handle; 1454 1455 io_task = task->dd_data; 1456 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); 1457 iscsi_put_task(task); 1458 } 1459 1460 static void 1461 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, 1462 struct iscsi_task *task, 1463 struct common_sol_cqe *csol_cqe) 1464 { 1465 struct iscsi_nopin *hdr; 1466 struct iscsi_conn *conn = beiscsi_conn->conn; 1467 struct beiscsi_io_task *io_task = task->dd_data; 1468 1469 hdr = (struct iscsi_nopin *)task->hdr; 1470 hdr->flags = csol_cqe->i_flags; 1471 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); 1472 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + 1473 csol_cqe->cmd_wnd - 1); 1474 1475 hdr->opcode = ISCSI_OP_NOOP_IN; 1476 hdr->itt = io_task->libiscsi_itt; 1477 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); 1478 } 1479 1480 static void adapter_get_sol_cqe(struct beiscsi_hba *phba, 1481 struct sol_cqe *psol, 1482 struct common_sol_cqe *csol_cqe) 1483 { 1484 if (is_chip_be2_be3r(phba)) { 1485 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, 1486 i_exp_cmd_sn, psol); 1487 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, 1488 i_res_cnt, psol); 1489 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, 1490 i_cmd_wnd, psol); 1491 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, 1492 wrb_index, psol); 1493 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, 1494 cid, psol); 1495 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1496 hw_sts, psol); 1497 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, 1498 i_resp, psol); 1499 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, 1500 i_sts, psol); 1501 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, 1502 i_flags, psol); 1503 } else { 1504 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1505 i_exp_cmd_sn, psol); 1506 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1507 i_res_cnt, psol); 1508 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1509 wrb_index, psol); 1510 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1511 cid, psol); 1512 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1513 hw_sts, psol); 1514 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1515 i_cmd_wnd, psol); 1516 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1517 cmd_cmpl, psol)) 1518 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1519 i_sts, psol); 1520 else 1521 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2, 1522 i_sts, psol); 1523 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1524 u, psol)) 1525 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW; 1526 1527 if (AMAP_GET_BITS(struct amap_sol_cqe_v2, 1528 o, psol)) 1529 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; 1530 } 1531 } 1532 1533 1534 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, 1535 struct beiscsi_hba *phba, struct sol_cqe *psol) 1536 { 1537 struct hwi_wrb_context *pwrb_context; 1538 struct wrb_handle *pwrb_handle; 1539 struct iscsi_wrb *pwrb = NULL; 1540 struct hwi_controller *phwi_ctrlr; 1541 struct iscsi_task *task; 1542 unsigned int type; 1543 struct iscsi_conn *conn = beiscsi_conn->conn; 1544 struct iscsi_session *session = conn->session; 1545 struct common_sol_cqe csol_cqe = {0}; 1546 uint16_t cri_index = 0; 1547 1548 phwi_ctrlr = phba->phwi_ctrlr; 1549 1550 /* Copy the elements to a common structure */ 1551 adapter_get_sol_cqe(phba, psol, &csol_cqe); 1552 1553 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); 1554 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 1555 1556 pwrb_handle = pwrb_context->pwrb_handle_basestd[ 1557 csol_cqe.wrb_index]; 1558 1559 task = pwrb_handle->pio_handle; 1560 pwrb = pwrb_handle->pwrb; 1561 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type; 1562 1563 spin_lock_bh(&session->back_lock); 1564 switch (type) { 1565 case HWH_TYPE_IO: 1566 case HWH_TYPE_IO_RD: 1567 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == 1568 ISCSI_OP_NOOP_OUT) 1569 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1570 else 1571 be_complete_io(beiscsi_conn, task, &csol_cqe); 1572 break; 1573 1574 case HWH_TYPE_LOGOUT: 1575 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT) 1576 be_complete_logout(beiscsi_conn, task, &csol_cqe); 1577 else 1578 be_complete_tmf(beiscsi_conn, task, &csol_cqe); 1579 break; 1580 1581 case HWH_TYPE_LOGIN: 1582 beiscsi_log(phba, KERN_ERR, 1583 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1584 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in" 1585 " hwi_complete_cmd- Solicited path\n"); 1586 break; 1587 1588 case HWH_TYPE_NOP: 1589 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe); 1590 break; 1591 1592 default: 1593 beiscsi_log(phba, KERN_WARNING, 1594 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1595 "BM_%d : In hwi_complete_cmd, unknown type = %d" 1596 "wrb_index 0x%x CID 0x%x\n", type, 1597 csol_cqe.wrb_index, 1598 csol_cqe.cid); 1599 break; 1600 } 1601 1602 spin_unlock_bh(&session->back_lock); 1603 } 1604 1605 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context 1606 *pasync_ctx, unsigned int is_header, 1607 unsigned int host_write_ptr) 1608 { 1609 if (is_header) 1610 return &pasync_ctx->async_entry[host_write_ptr]. 1611 header_busy_list; 1612 else 1613 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list; 1614 } 1615 1616 static struct async_pdu_handle * 1617 hwi_get_async_handle(struct beiscsi_hba *phba, 1618 struct beiscsi_conn *beiscsi_conn, 1619 struct hwi_async_pdu_context *pasync_ctx, 1620 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index) 1621 { 1622 struct be_bus_address phys_addr; 1623 struct list_head *pbusy_list; 1624 struct async_pdu_handle *pasync_handle = NULL; 1625 unsigned char is_header = 0; 1626 unsigned int index, dpl; 1627 1628 if (is_chip_be2_be3r(phba)) { 1629 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1630 dpl, pdpdu_cqe); 1631 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, 1632 index, pdpdu_cqe); 1633 } else { 1634 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1635 dpl, pdpdu_cqe); 1636 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, 1637 index, pdpdu_cqe); 1638 } 1639 1640 phys_addr.u.a32.address_lo = 1641 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1642 db_addr_lo) / 32] - dpl); 1643 phys_addr.u.a32.address_hi = 1644 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1645 db_addr_hi) / 32]; 1646 1647 phys_addr.u.a64.address = 1648 *((unsigned long long *)(&phys_addr.u.a64.address)); 1649 1650 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32] 1651 & PDUCQE_CODE_MASK) { 1652 case UNSOL_HDR_NOTIFY: 1653 is_header = 1; 1654 1655 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1656 is_header, index); 1657 break; 1658 case UNSOL_DATA_NOTIFY: 1659 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1660 is_header, index); 1661 break; 1662 default: 1663 pbusy_list = NULL; 1664 beiscsi_log(phba, KERN_WARNING, 1665 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 1666 "BM_%d : Unexpected code=%d\n", 1667 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, 1668 code) / 32] & PDUCQE_CODE_MASK); 1669 return NULL; 1670 } 1671 1672 WARN_ON(list_empty(pbusy_list)); 1673 list_for_each_entry(pasync_handle, pbusy_list, link) { 1674 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address) 1675 break; 1676 } 1677 1678 WARN_ON(!pasync_handle); 1679 1680 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID( 1681 beiscsi_conn->beiscsi_conn_cid); 1682 pasync_handle->is_header = is_header; 1683 pasync_handle->buffer_len = dpl; 1684 *pcq_index = index; 1685 1686 return pasync_handle; 1687 } 1688 1689 static unsigned int 1690 hwi_update_async_writables(struct beiscsi_hba *phba, 1691 struct hwi_async_pdu_context *pasync_ctx, 1692 unsigned int is_header, unsigned int cq_index) 1693 { 1694 struct list_head *pbusy_list; 1695 struct async_pdu_handle *pasync_handle; 1696 unsigned int num_entries, writables = 0; 1697 unsigned int *pep_read_ptr, *pwritables; 1698 1699 num_entries = pasync_ctx->num_entries; 1700 if (is_header) { 1701 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr; 1702 pwritables = &pasync_ctx->async_header.writables; 1703 } else { 1704 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr; 1705 pwritables = &pasync_ctx->async_data.writables; 1706 } 1707 1708 while ((*pep_read_ptr) != cq_index) { 1709 (*pep_read_ptr)++; 1710 *pep_read_ptr = (*pep_read_ptr) % num_entries; 1711 1712 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header, 1713 *pep_read_ptr); 1714 if (writables == 0) 1715 WARN_ON(list_empty(pbusy_list)); 1716 1717 if (!list_empty(pbusy_list)) { 1718 pasync_handle = list_entry(pbusy_list->next, 1719 struct async_pdu_handle, 1720 link); 1721 WARN_ON(!pasync_handle); 1722 pasync_handle->consumed = 1; 1723 } 1724 1725 writables++; 1726 } 1727 1728 if (!writables) { 1729 beiscsi_log(phba, KERN_ERR, 1730 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 1731 "BM_%d : Duplicate notification received - index 0x%x!!\n", 1732 cq_index); 1733 WARN_ON(1); 1734 } 1735 1736 *pwritables = *pwritables + writables; 1737 return 0; 1738 } 1739 1740 static void hwi_free_async_msg(struct beiscsi_hba *phba, 1741 struct hwi_async_pdu_context *pasync_ctx, 1742 unsigned int cri) 1743 { 1744 struct async_pdu_handle *pasync_handle, *tmp_handle; 1745 struct list_head *plist; 1746 1747 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1748 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) { 1749 list_del(&pasync_handle->link); 1750 1751 if (pasync_handle->is_header) { 1752 list_add_tail(&pasync_handle->link, 1753 &pasync_ctx->async_header.free_list); 1754 pasync_ctx->async_header.free_entries++; 1755 } else { 1756 list_add_tail(&pasync_handle->link, 1757 &pasync_ctx->async_data.free_list); 1758 pasync_ctx->async_data.free_entries++; 1759 } 1760 } 1761 1762 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list); 1763 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0; 1764 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1765 } 1766 1767 static struct phys_addr * 1768 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx, 1769 unsigned int is_header, unsigned int host_write_ptr) 1770 { 1771 struct phys_addr *pasync_sge = NULL; 1772 1773 if (is_header) 1774 pasync_sge = pasync_ctx->async_header.ring_base; 1775 else 1776 pasync_sge = pasync_ctx->async_data.ring_base; 1777 1778 return pasync_sge + host_write_ptr; 1779 } 1780 1781 static void hwi_post_async_buffers(struct beiscsi_hba *phba, 1782 unsigned int is_header, uint8_t ulp_num) 1783 { 1784 struct hwi_controller *phwi_ctrlr; 1785 struct hwi_async_pdu_context *pasync_ctx; 1786 struct async_pdu_handle *pasync_handle; 1787 struct list_head *pfree_link, *pbusy_list; 1788 struct phys_addr *pasync_sge; 1789 unsigned int ring_id, num_entries; 1790 unsigned int host_write_num, doorbell_offset; 1791 unsigned int writables; 1792 unsigned int i = 0; 1793 u32 doorbell = 0; 1794 1795 phwi_ctrlr = phba->phwi_ctrlr; 1796 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num); 1797 num_entries = pasync_ctx->num_entries; 1798 1799 if (is_header) { 1800 writables = min(pasync_ctx->async_header.writables, 1801 pasync_ctx->async_header.free_entries); 1802 pfree_link = pasync_ctx->async_header.free_list.next; 1803 host_write_num = pasync_ctx->async_header.host_write_ptr; 1804 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id; 1805 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num]. 1806 doorbell_offset; 1807 } else { 1808 writables = min(pasync_ctx->async_data.writables, 1809 pasync_ctx->async_data.free_entries); 1810 pfree_link = pasync_ctx->async_data.free_list.next; 1811 host_write_num = pasync_ctx->async_data.host_write_ptr; 1812 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id; 1813 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num]. 1814 doorbell_offset; 1815 } 1816 1817 writables = (writables / 8) * 8; 1818 if (writables) { 1819 for (i = 0; i < writables; i++) { 1820 pbusy_list = 1821 hwi_get_async_busy_list(pasync_ctx, is_header, 1822 host_write_num); 1823 pasync_handle = 1824 list_entry(pfree_link, struct async_pdu_handle, 1825 link); 1826 WARN_ON(!pasync_handle); 1827 pasync_handle->consumed = 0; 1828 1829 pfree_link = pfree_link->next; 1830 1831 pasync_sge = hwi_get_ring_address(pasync_ctx, 1832 is_header, host_write_num); 1833 1834 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo; 1835 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi; 1836 1837 list_move(&pasync_handle->link, pbusy_list); 1838 1839 host_write_num++; 1840 host_write_num = host_write_num % num_entries; 1841 } 1842 1843 if (is_header) { 1844 pasync_ctx->async_header.host_write_ptr = 1845 host_write_num; 1846 pasync_ctx->async_header.free_entries -= writables; 1847 pasync_ctx->async_header.writables -= writables; 1848 pasync_ctx->async_header.busy_entries += writables; 1849 } else { 1850 pasync_ctx->async_data.host_write_ptr = host_write_num; 1851 pasync_ctx->async_data.free_entries -= writables; 1852 pasync_ctx->async_data.writables -= writables; 1853 pasync_ctx->async_data.busy_entries += writables; 1854 } 1855 1856 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK; 1857 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT; 1858 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT; 1859 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK) 1860 << DB_DEF_PDU_CQPROC_SHIFT; 1861 1862 iowrite32(doorbell, phba->db_va + doorbell_offset); 1863 } 1864 } 1865 1866 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba, 1867 struct beiscsi_conn *beiscsi_conn, 1868 struct i_t_dpdu_cqe *pdpdu_cqe) 1869 { 1870 struct hwi_controller *phwi_ctrlr; 1871 struct hwi_async_pdu_context *pasync_ctx; 1872 struct async_pdu_handle *pasync_handle = NULL; 1873 unsigned int cq_index = -1; 1874 uint16_t cri_index = BE_GET_CRI_FROM_CID( 1875 beiscsi_conn->beiscsi_conn_cid); 1876 1877 phwi_ctrlr = phba->phwi_ctrlr; 1878 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1879 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1880 cri_index)); 1881 1882 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 1883 pdpdu_cqe, &cq_index); 1884 BUG_ON(pasync_handle->is_header != 0); 1885 if (pasync_handle->consumed == 0) 1886 hwi_update_async_writables(phba, pasync_ctx, 1887 pasync_handle->is_header, cq_index); 1888 1889 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri); 1890 hwi_post_async_buffers(phba, pasync_handle->is_header, 1891 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1892 cri_index)); 1893 } 1894 1895 static unsigned int 1896 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, 1897 struct beiscsi_hba *phba, 1898 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri) 1899 { 1900 struct list_head *plist; 1901 struct async_pdu_handle *pasync_handle; 1902 void *phdr = NULL; 1903 unsigned int hdr_len = 0, buf_len = 0; 1904 unsigned int status, index = 0, offset = 0; 1905 void *pfirst_buffer = NULL; 1906 unsigned int num_buf = 0; 1907 1908 plist = &pasync_ctx->async_entry[cri].wait_queue.list; 1909 1910 list_for_each_entry(pasync_handle, plist, link) { 1911 if (index == 0) { 1912 phdr = pasync_handle->pbuffer; 1913 hdr_len = pasync_handle->buffer_len; 1914 } else { 1915 buf_len = pasync_handle->buffer_len; 1916 if (!num_buf) { 1917 pfirst_buffer = pasync_handle->pbuffer; 1918 num_buf++; 1919 } 1920 memcpy(pfirst_buffer + offset, 1921 pasync_handle->pbuffer, buf_len); 1922 offset += buf_len; 1923 } 1924 index++; 1925 } 1926 1927 status = beiscsi_process_async_pdu(beiscsi_conn, phba, 1928 phdr, hdr_len, pfirst_buffer, 1929 offset); 1930 1931 hwi_free_async_msg(phba, pasync_ctx, cri); 1932 return 0; 1933 } 1934 1935 static unsigned int 1936 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn, 1937 struct beiscsi_hba *phba, 1938 struct async_pdu_handle *pasync_handle) 1939 { 1940 struct hwi_async_pdu_context *pasync_ctx; 1941 struct hwi_controller *phwi_ctrlr; 1942 unsigned int bytes_needed = 0, status = 0; 1943 unsigned short cri = pasync_handle->cri; 1944 struct pdu_base *ppdu; 1945 1946 phwi_ctrlr = phba->phwi_ctrlr; 1947 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 1948 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 1949 BE_GET_CRI_FROM_CID(beiscsi_conn-> 1950 beiscsi_conn_cid))); 1951 1952 list_del(&pasync_handle->link); 1953 if (pasync_handle->is_header) { 1954 pasync_ctx->async_header.busy_entries--; 1955 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1956 hwi_free_async_msg(phba, pasync_ctx, cri); 1957 BUG(); 1958 } 1959 1960 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0; 1961 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1; 1962 pasync_ctx->async_entry[cri].wait_queue.hdr_len = 1963 (unsigned short)pasync_handle->buffer_len; 1964 list_add_tail(&pasync_handle->link, 1965 &pasync_ctx->async_entry[cri].wait_queue.list); 1966 1967 ppdu = pasync_handle->pbuffer; 1968 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base, 1969 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) & 1970 0xFFFF0000) | ((be16_to_cpu((ppdu-> 1971 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32] 1972 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF)); 1973 1974 if (status == 0) { 1975 pasync_ctx->async_entry[cri].wait_queue.bytes_needed = 1976 bytes_needed; 1977 1978 if (bytes_needed == 0) 1979 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1980 pasync_ctx, cri); 1981 } 1982 } else { 1983 pasync_ctx->async_data.busy_entries--; 1984 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) { 1985 list_add_tail(&pasync_handle->link, 1986 &pasync_ctx->async_entry[cri].wait_queue. 1987 list); 1988 pasync_ctx->async_entry[cri].wait_queue. 1989 bytes_received += 1990 (unsigned short)pasync_handle->buffer_len; 1991 1992 if (pasync_ctx->async_entry[cri].wait_queue. 1993 bytes_received >= 1994 pasync_ctx->async_entry[cri].wait_queue. 1995 bytes_needed) 1996 status = hwi_fwd_async_msg(beiscsi_conn, phba, 1997 pasync_ctx, cri); 1998 } 1999 } 2000 return status; 2001 } 2002 2003 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn, 2004 struct beiscsi_hba *phba, 2005 struct i_t_dpdu_cqe *pdpdu_cqe) 2006 { 2007 struct hwi_controller *phwi_ctrlr; 2008 struct hwi_async_pdu_context *pasync_ctx; 2009 struct async_pdu_handle *pasync_handle = NULL; 2010 unsigned int cq_index = -1; 2011 uint16_t cri_index = BE_GET_CRI_FROM_CID( 2012 beiscsi_conn->beiscsi_conn_cid); 2013 2014 phwi_ctrlr = phba->phwi_ctrlr; 2015 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, 2016 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, 2017 cri_index)); 2018 2019 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx, 2020 pdpdu_cqe, &cq_index); 2021 2022 if (pasync_handle->consumed == 0) 2023 hwi_update_async_writables(phba, pasync_ctx, 2024 pasync_handle->is_header, cq_index); 2025 2026 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle); 2027 hwi_post_async_buffers(phba, pasync_handle->is_header, 2028 BEISCSI_GET_ULP_FROM_CRI( 2029 phwi_ctrlr, cri_index)); 2030 } 2031 2032 static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba) 2033 { 2034 struct be_queue_info *mcc_cq; 2035 struct be_mcc_compl *mcc_compl; 2036 unsigned int num_processed = 0; 2037 2038 mcc_cq = &phba->ctrl.mcc_obj.cq; 2039 mcc_compl = queue_tail_node(mcc_cq); 2040 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2041 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) { 2042 2043 if (num_processed >= 32) { 2044 hwi_ring_cq_db(phba, mcc_cq->id, 2045 num_processed, 0, 0); 2046 num_processed = 0; 2047 } 2048 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) { 2049 /* Interpret flags as an async trailer */ 2050 if (is_link_state_evt(mcc_compl->flags)) 2051 /* Interpret compl as a async link evt */ 2052 beiscsi_async_link_state_process(phba, 2053 (struct be_async_event_link_state *) mcc_compl); 2054 else { 2055 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX, 2056 "BM_%d : Unsupported Async Event, flags" 2057 " = 0x%08x\n", 2058 mcc_compl->flags); 2059 if (phba->state & BE_ADAPTER_LINK_UP) { 2060 phba->state |= BE_ADAPTER_CHECK_BOOT; 2061 phba->get_boot = BE_GET_BOOT_RETRIES; 2062 } 2063 } 2064 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) { 2065 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl); 2066 atomic_dec(&phba->ctrl.mcc_obj.q.used); 2067 } 2068 2069 mcc_compl->flags = 0; 2070 queue_tail_inc(mcc_cq); 2071 mcc_compl = queue_tail_node(mcc_cq); 2072 mcc_compl->flags = le32_to_cpu(mcc_compl->flags); 2073 num_processed++; 2074 } 2075 2076 if (num_processed > 0) 2077 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0); 2078 2079 } 2080 2081 /** 2082 * beiscsi_process_cq()- Process the Completion Queue 2083 * @pbe_eq: Event Q on which the Completion has come 2084 * 2085 * return 2086 * Number of Completion Entries processed. 2087 **/ 2088 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 2089 { 2090 struct be_queue_info *cq; 2091 struct sol_cqe *sol; 2092 struct dmsg_cqe *dmsg; 2093 unsigned int num_processed = 0; 2094 unsigned int tot_nump = 0; 2095 unsigned short code = 0, cid = 0; 2096 uint16_t cri_index = 0; 2097 struct beiscsi_conn *beiscsi_conn; 2098 struct beiscsi_endpoint *beiscsi_ep; 2099 struct iscsi_endpoint *ep; 2100 struct beiscsi_hba *phba; 2101 2102 cq = pbe_eq->cq; 2103 sol = queue_tail_node(cq); 2104 phba = pbe_eq->phba; 2105 2106 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] & 2107 CQE_VALID_MASK) { 2108 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe)); 2109 2110 code = (sol->dw[offsetof(struct amap_sol_cqe, code) / 2111 32] & CQE_CODE_MASK); 2112 2113 /* Get the CID */ 2114 if (is_chip_be2_be3r(phba)) { 2115 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); 2116 } else { 2117 if ((code == DRIVERMSG_NOTIFY) || 2118 (code == UNSOL_HDR_NOTIFY) || 2119 (code == UNSOL_DATA_NOTIFY)) 2120 cid = AMAP_GET_BITS( 2121 struct amap_i_t_dpdu_cqe_v2, 2122 cid, sol); 2123 else 2124 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, 2125 cid, sol); 2126 } 2127 2128 cri_index = BE_GET_CRI_FROM_CID(cid); 2129 ep = phba->ep_array[cri_index]; 2130 2131 if (ep == NULL) { 2132 /* connection has already been freed 2133 * just move on to next one 2134 */ 2135 beiscsi_log(phba, KERN_WARNING, 2136 BEISCSI_LOG_INIT, 2137 "BM_%d : proc cqe of disconn ep: cid %d\n", 2138 cid); 2139 goto proc_next_cqe; 2140 } 2141 2142 beiscsi_ep = ep->dd_data; 2143 beiscsi_conn = beiscsi_ep->conn; 2144 2145 if (num_processed >= 32) { 2146 hwi_ring_cq_db(phba, cq->id, 2147 num_processed, 0, 0); 2148 tot_nump += num_processed; 2149 num_processed = 0; 2150 } 2151 2152 switch (code) { 2153 case SOL_CMD_COMPLETE: 2154 hwi_complete_cmd(beiscsi_conn, phba, sol); 2155 break; 2156 case DRIVERMSG_NOTIFY: 2157 beiscsi_log(phba, KERN_INFO, 2158 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2159 "BM_%d : Received %s[%d] on CID : %d\n", 2160 cqe_desc[code], code, cid); 2161 2162 dmsg = (struct dmsg_cqe *)sol; 2163 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol); 2164 break; 2165 case UNSOL_HDR_NOTIFY: 2166 beiscsi_log(phba, KERN_INFO, 2167 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2168 "BM_%d : Received %s[%d] on CID : %d\n", 2169 cqe_desc[code], code, cid); 2170 2171 spin_lock_bh(&phba->async_pdu_lock); 2172 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2173 (struct i_t_dpdu_cqe *)sol); 2174 spin_unlock_bh(&phba->async_pdu_lock); 2175 break; 2176 case UNSOL_DATA_NOTIFY: 2177 beiscsi_log(phba, KERN_INFO, 2178 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2179 "BM_%d : Received %s[%d] on CID : %d\n", 2180 cqe_desc[code], code, cid); 2181 2182 spin_lock_bh(&phba->async_pdu_lock); 2183 hwi_process_default_pdu_ring(beiscsi_conn, phba, 2184 (struct i_t_dpdu_cqe *)sol); 2185 spin_unlock_bh(&phba->async_pdu_lock); 2186 break; 2187 case CXN_INVALIDATE_INDEX_NOTIFY: 2188 case CMD_INVALIDATED_NOTIFY: 2189 case CXN_INVALIDATE_NOTIFY: 2190 beiscsi_log(phba, KERN_ERR, 2191 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2192 "BM_%d : Ignoring %s[%d] on CID : %d\n", 2193 cqe_desc[code], code, cid); 2194 break; 2195 case SOL_CMD_KILLED_DATA_DIGEST_ERR: 2196 case CMD_KILLED_INVALID_STATSN_RCVD: 2197 case CMD_KILLED_INVALID_R2T_RCVD: 2198 case CMD_CXN_KILLED_LUN_INVALID: 2199 case CMD_CXN_KILLED_ICD_INVALID: 2200 case CMD_CXN_KILLED_ITT_INVALID: 2201 case CMD_CXN_KILLED_SEQ_OUTOFORDER: 2202 case CMD_CXN_KILLED_INVALID_DATASN_RCVD: 2203 beiscsi_log(phba, KERN_ERR, 2204 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2205 "BM_%d : Cmd Notification %s[%d] on CID : %d\n", 2206 cqe_desc[code], code, cid); 2207 break; 2208 case UNSOL_DATA_DIGEST_ERROR_NOTIFY: 2209 beiscsi_log(phba, KERN_ERR, 2210 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2211 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n", 2212 cqe_desc[code], code, cid); 2213 spin_lock_bh(&phba->async_pdu_lock); 2214 hwi_flush_default_pdu_buffer(phba, beiscsi_conn, 2215 (struct i_t_dpdu_cqe *) sol); 2216 spin_unlock_bh(&phba->async_pdu_lock); 2217 break; 2218 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL: 2219 case CXN_KILLED_BURST_LEN_MISMATCH: 2220 case CXN_KILLED_AHS_RCVD: 2221 case CXN_KILLED_HDR_DIGEST_ERR: 2222 case CXN_KILLED_UNKNOWN_HDR: 2223 case CXN_KILLED_STALE_ITT_TTT_RCVD: 2224 case CXN_KILLED_INVALID_ITT_TTT_RCVD: 2225 case CXN_KILLED_TIMED_OUT: 2226 case CXN_KILLED_FIN_RCVD: 2227 case CXN_KILLED_RST_SENT: 2228 case CXN_KILLED_RST_RCVD: 2229 case CXN_KILLED_BAD_UNSOL_PDU_RCVD: 2230 case CXN_KILLED_BAD_WRB_INDEX_ERROR: 2231 case CXN_KILLED_OVER_RUN_RESIDUAL: 2232 case CXN_KILLED_UNDER_RUN_RESIDUAL: 2233 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN: 2234 beiscsi_log(phba, KERN_ERR, 2235 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2236 "BM_%d : Event %s[%d] received on CID : %d\n", 2237 cqe_desc[code], code, cid); 2238 if (beiscsi_conn) 2239 iscsi_conn_failure(beiscsi_conn->conn, 2240 ISCSI_ERR_CONN_FAILED); 2241 break; 2242 default: 2243 beiscsi_log(phba, KERN_ERR, 2244 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 2245 "BM_%d : Invalid CQE Event Received Code : %d" 2246 "CID 0x%x...\n", 2247 code, cid); 2248 break; 2249 } 2250 2251 proc_next_cqe: 2252 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0); 2253 queue_tail_inc(cq); 2254 sol = queue_tail_node(cq); 2255 num_processed++; 2256 } 2257 2258 if (num_processed > 0) { 2259 tot_nump += num_processed; 2260 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0); 2261 } 2262 return tot_nump; 2263 } 2264 2265 void beiscsi_process_all_cqs(struct work_struct *work) 2266 { 2267 unsigned long flags; 2268 struct hwi_controller *phwi_ctrlr; 2269 struct hwi_context_memory *phwi_context; 2270 struct beiscsi_hba *phba; 2271 struct be_eq_obj *pbe_eq = 2272 container_of(work, struct be_eq_obj, work_cqs); 2273 2274 phba = pbe_eq->phba; 2275 phwi_ctrlr = phba->phwi_ctrlr; 2276 phwi_context = phwi_ctrlr->phwi_ctxt; 2277 2278 if (pbe_eq->todo_mcc_cq) { 2279 spin_lock_irqsave(&phba->isr_lock, flags); 2280 pbe_eq->todo_mcc_cq = false; 2281 spin_unlock_irqrestore(&phba->isr_lock, flags); 2282 beiscsi_process_mcc_isr(phba); 2283 } 2284 2285 if (pbe_eq->todo_cq) { 2286 spin_lock_irqsave(&phba->isr_lock, flags); 2287 pbe_eq->todo_cq = false; 2288 spin_unlock_irqrestore(&phba->isr_lock, flags); 2289 beiscsi_process_cq(pbe_eq); 2290 } 2291 2292 /* rearm EQ for further interrupts */ 2293 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2294 } 2295 2296 static int be_iopoll(struct blk_iopoll *iop, int budget) 2297 { 2298 unsigned int ret; 2299 struct beiscsi_hba *phba; 2300 struct be_eq_obj *pbe_eq; 2301 2302 pbe_eq = container_of(iop, struct be_eq_obj, iopoll); 2303 ret = beiscsi_process_cq(pbe_eq); 2304 pbe_eq->cq_count += ret; 2305 if (ret < budget) { 2306 phba = pbe_eq->phba; 2307 blk_iopoll_complete(iop); 2308 beiscsi_log(phba, KERN_INFO, 2309 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO, 2310 "BM_%d : rearm pbe_eq->q.id =%d\n", 2311 pbe_eq->q.id); 2312 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); 2313 } 2314 return ret; 2315 } 2316 2317 static void 2318 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2319 unsigned int num_sg, struct beiscsi_io_task *io_task) 2320 { 2321 struct iscsi_sge *psgl; 2322 unsigned int sg_len, index; 2323 unsigned int sge_len = 0; 2324 unsigned long long addr; 2325 struct scatterlist *l_sg; 2326 unsigned int offset; 2327 2328 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb, 2329 io_task->bhs_pa.u.a32.address_lo); 2330 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb, 2331 io_task->bhs_pa.u.a32.address_hi); 2332 2333 l_sg = sg; 2334 for (index = 0; (index < num_sg) && (index < 2); index++, 2335 sg = sg_next(sg)) { 2336 if (index == 0) { 2337 sg_len = sg_dma_len(sg); 2338 addr = (u64) sg_dma_address(sg); 2339 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2340 sge0_addr_lo, pwrb, 2341 lower_32_bits(addr)); 2342 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2343 sge0_addr_hi, pwrb, 2344 upper_32_bits(addr)); 2345 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2346 sge0_len, pwrb, 2347 sg_len); 2348 sge_len = sg_len; 2349 } else { 2350 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset, 2351 pwrb, sge_len); 2352 sg_len = sg_dma_len(sg); 2353 addr = (u64) sg_dma_address(sg); 2354 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2355 sge1_addr_lo, pwrb, 2356 lower_32_bits(addr)); 2357 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2358 sge1_addr_hi, pwrb, 2359 upper_32_bits(addr)); 2360 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 2361 sge1_len, pwrb, 2362 sg_len); 2363 } 2364 } 2365 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2366 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2367 2368 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2369 2370 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2371 io_task->bhs_pa.u.a32.address_hi); 2372 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2373 io_task->bhs_pa.u.a32.address_lo); 2374 2375 if (num_sg == 1) { 2376 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2377 1); 2378 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2379 0); 2380 } else if (num_sg == 2) { 2381 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2382 0); 2383 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2384 1); 2385 } else { 2386 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb, 2387 0); 2388 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb, 2389 0); 2390 } 2391 2392 sg = l_sg; 2393 psgl++; 2394 psgl++; 2395 offset = 0; 2396 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2397 sg_len = sg_dma_len(sg); 2398 addr = (u64) sg_dma_address(sg); 2399 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2400 lower_32_bits(addr)); 2401 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2402 upper_32_bits(addr)); 2403 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2404 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2405 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2406 offset += sg_len; 2407 } 2408 psgl--; 2409 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2410 } 2411 2412 static void 2413 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg, 2414 unsigned int num_sg, struct beiscsi_io_task *io_task) 2415 { 2416 struct iscsi_sge *psgl; 2417 unsigned int sg_len, index; 2418 unsigned int sge_len = 0; 2419 unsigned long long addr; 2420 struct scatterlist *l_sg; 2421 unsigned int offset; 2422 2423 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2424 io_task->bhs_pa.u.a32.address_lo); 2425 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2426 io_task->bhs_pa.u.a32.address_hi); 2427 2428 l_sg = sg; 2429 for (index = 0; (index < num_sg) && (index < 2); index++, 2430 sg = sg_next(sg)) { 2431 if (index == 0) { 2432 sg_len = sg_dma_len(sg); 2433 addr = (u64) sg_dma_address(sg); 2434 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2435 ((u32)(addr & 0xFFFFFFFF))); 2436 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2437 ((u32)(addr >> 32))); 2438 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2439 sg_len); 2440 sge_len = sg_len; 2441 } else { 2442 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset, 2443 pwrb, sge_len); 2444 sg_len = sg_dma_len(sg); 2445 addr = (u64) sg_dma_address(sg); 2446 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb, 2447 ((u32)(addr & 0xFFFFFFFF))); 2448 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb, 2449 ((u32)(addr >> 32))); 2450 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb, 2451 sg_len); 2452 } 2453 } 2454 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2455 memset(psgl, 0, sizeof(*psgl) * BE2_SGE); 2456 2457 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2); 2458 2459 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2460 io_task->bhs_pa.u.a32.address_hi); 2461 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2462 io_task->bhs_pa.u.a32.address_lo); 2463 2464 if (num_sg == 1) { 2465 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2466 1); 2467 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2468 0); 2469 } else if (num_sg == 2) { 2470 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2471 0); 2472 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2473 1); 2474 } else { 2475 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 2476 0); 2477 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 2478 0); 2479 } 2480 sg = l_sg; 2481 psgl++; 2482 psgl++; 2483 offset = 0; 2484 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) { 2485 sg_len = sg_dma_len(sg); 2486 addr = (u64) sg_dma_address(sg); 2487 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2488 (addr & 0xFFFFFFFF)); 2489 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2490 (addr >> 32)); 2491 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len); 2492 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset); 2493 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2494 offset += sg_len; 2495 } 2496 psgl--; 2497 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2498 } 2499 2500 /** 2501 * hwi_write_buffer()- Populate the WRB with task info 2502 * @pwrb: ptr to the WRB entry 2503 * @task: iscsi task which is to be executed 2504 **/ 2505 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) 2506 { 2507 struct iscsi_sge *psgl; 2508 struct beiscsi_io_task *io_task = task->dd_data; 2509 struct beiscsi_conn *beiscsi_conn = io_task->conn; 2510 struct beiscsi_hba *phba = beiscsi_conn->phba; 2511 uint8_t dsp_value = 0; 2512 2513 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2; 2514 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb, 2515 io_task->bhs_pa.u.a32.address_lo); 2516 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb, 2517 io_task->bhs_pa.u.a32.address_hi); 2518 2519 if (task->data) { 2520 2521 /* Check for the data_count */ 2522 dsp_value = (task->data_count) ? 1 : 0; 2523 2524 if (is_chip_be2_be3r(phba)) 2525 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, 2526 pwrb, dsp_value); 2527 else 2528 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, 2529 pwrb, dsp_value); 2530 2531 /* Map addr only if there is data_count */ 2532 if (dsp_value) { 2533 io_task->mtask_addr = pci_map_single(phba->pcidev, 2534 task->data, 2535 task->data_count, 2536 PCI_DMA_TODEVICE); 2537 io_task->mtask_data_count = task->data_count; 2538 } else 2539 io_task->mtask_addr = 0; 2540 2541 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb, 2542 lower_32_bits(io_task->mtask_addr)); 2543 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb, 2544 upper_32_bits(io_task->mtask_addr)); 2545 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb, 2546 task->data_count); 2547 2548 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1); 2549 } else { 2550 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 2551 io_task->mtask_addr = 0; 2552 } 2553 2554 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag; 2555 2556 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len); 2557 2558 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2559 io_task->bhs_pa.u.a32.address_hi); 2560 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2561 io_task->bhs_pa.u.a32.address_lo); 2562 if (task->data) { 2563 psgl++; 2564 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0); 2565 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0); 2566 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0); 2567 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0); 2568 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0); 2569 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0); 2570 2571 psgl++; 2572 if (task->data) { 2573 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 2574 lower_32_bits(io_task->mtask_addr)); 2575 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 2576 upper_32_bits(io_task->mtask_addr)); 2577 } 2578 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106); 2579 } 2580 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1); 2581 } 2582 2583 /** 2584 * beiscsi_find_mem_req()- Find mem needed 2585 * @phba: ptr to HBA struct 2586 **/ 2587 static void beiscsi_find_mem_req(struct beiscsi_hba *phba) 2588 { 2589 uint8_t mem_descr_index, ulp_num; 2590 unsigned int num_cq_pages, num_async_pdu_buf_pages; 2591 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn; 2592 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages; 2593 2594 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 2595 sizeof(struct sol_cqe)); 2596 2597 phba->params.hwi_ws_sz = sizeof(struct hwi_controller); 2598 2599 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 * 2600 BE_ISCSI_PDU_HEADER_SIZE; 2601 phba->mem_req[HWI_MEM_ADDN_CONTEXT] = 2602 sizeof(struct hwi_context_memory); 2603 2604 2605 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb) 2606 * (phba->params.wrbs_per_cxn) 2607 * phba->params.cxns_per_ctrl; 2608 wrb_sz_per_cxn = sizeof(struct wrb_handle) * 2609 (phba->params.wrbs_per_cxn); 2610 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) * 2611 phba->params.cxns_per_ctrl); 2612 2613 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) * 2614 phba->params.icds_per_ctrl; 2615 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) * 2616 phba->params.num_sge_per_io * phba->params.icds_per_ctrl; 2617 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2618 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2619 2620 num_async_pdu_buf_sgl_pages = 2621 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2622 phba, ulp_num) * 2623 sizeof(struct phys_addr)); 2624 2625 num_async_pdu_buf_pages = 2626 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2627 phba, ulp_num) * 2628 phba->params.defpdu_hdr_sz); 2629 2630 num_async_pdu_data_pages = 2631 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2632 phba, ulp_num) * 2633 phba->params.defpdu_data_sz); 2634 2635 num_async_pdu_data_sgl_pages = 2636 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT( 2637 phba, ulp_num) * 2638 sizeof(struct phys_addr)); 2639 2640 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 + 2641 (ulp_num * MEM_DESCR_OFFSET)); 2642 phba->mem_req[mem_descr_index] = 2643 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2644 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE; 2645 2646 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2647 (ulp_num * MEM_DESCR_OFFSET)); 2648 phba->mem_req[mem_descr_index] = 2649 num_async_pdu_buf_pages * 2650 PAGE_SIZE; 2651 2652 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 + 2653 (ulp_num * MEM_DESCR_OFFSET)); 2654 phba->mem_req[mem_descr_index] = 2655 num_async_pdu_data_pages * 2656 PAGE_SIZE; 2657 2658 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2659 (ulp_num * MEM_DESCR_OFFSET)); 2660 phba->mem_req[mem_descr_index] = 2661 num_async_pdu_buf_sgl_pages * 2662 PAGE_SIZE; 2663 2664 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 + 2665 (ulp_num * MEM_DESCR_OFFSET)); 2666 phba->mem_req[mem_descr_index] = 2667 num_async_pdu_data_sgl_pages * 2668 PAGE_SIZE; 2669 2670 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 2671 (ulp_num * MEM_DESCR_OFFSET)); 2672 phba->mem_req[mem_descr_index] = 2673 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2674 sizeof(struct async_pdu_handle); 2675 2676 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 2677 (ulp_num * MEM_DESCR_OFFSET)); 2678 phba->mem_req[mem_descr_index] = 2679 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2680 sizeof(struct async_pdu_handle); 2681 2682 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2683 (ulp_num * MEM_DESCR_OFFSET)); 2684 phba->mem_req[mem_descr_index] = 2685 sizeof(struct hwi_async_pdu_context) + 2686 (BEISCSI_GET_CID_COUNT(phba, ulp_num) * 2687 sizeof(struct hwi_async_entry)); 2688 } 2689 } 2690 } 2691 2692 static int beiscsi_alloc_mem(struct beiscsi_hba *phba) 2693 { 2694 dma_addr_t bus_add; 2695 struct hwi_controller *phwi_ctrlr; 2696 struct be_mem_descriptor *mem_descr; 2697 struct mem_array *mem_arr, *mem_arr_orig; 2698 unsigned int i, j, alloc_size, curr_alloc_size; 2699 2700 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL); 2701 if (!phba->phwi_ctrlr) 2702 return -ENOMEM; 2703 2704 /* Allocate memory for wrb_context */ 2705 phwi_ctrlr = phba->phwi_ctrlr; 2706 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * 2707 phba->params.cxns_per_ctrl, 2708 GFP_KERNEL); 2709 if (!phwi_ctrlr->wrb_context) 2710 return -ENOMEM; 2711 2712 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), 2713 GFP_KERNEL); 2714 if (!phba->init_mem) { 2715 kfree(phwi_ctrlr->wrb_context); 2716 kfree(phba->phwi_ctrlr); 2717 return -ENOMEM; 2718 } 2719 2720 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT, 2721 GFP_KERNEL); 2722 if (!mem_arr_orig) { 2723 kfree(phba->init_mem); 2724 kfree(phwi_ctrlr->wrb_context); 2725 kfree(phba->phwi_ctrlr); 2726 return -ENOMEM; 2727 } 2728 2729 mem_descr = phba->init_mem; 2730 for (i = 0; i < SE_MEM_MAX; i++) { 2731 if (!phba->mem_req[i]) { 2732 mem_descr->mem_array = NULL; 2733 mem_descr++; 2734 continue; 2735 } 2736 2737 j = 0; 2738 mem_arr = mem_arr_orig; 2739 alloc_size = phba->mem_req[i]; 2740 memset(mem_arr, 0, sizeof(struct mem_array) * 2741 BEISCSI_MAX_FRAGS_INIT); 2742 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size); 2743 do { 2744 mem_arr->virtual_address = pci_alloc_consistent( 2745 phba->pcidev, 2746 curr_alloc_size, 2747 &bus_add); 2748 if (!mem_arr->virtual_address) { 2749 if (curr_alloc_size <= BE_MIN_MEM_SIZE) 2750 goto free_mem; 2751 if (curr_alloc_size - 2752 rounddown_pow_of_two(curr_alloc_size)) 2753 curr_alloc_size = rounddown_pow_of_two 2754 (curr_alloc_size); 2755 else 2756 curr_alloc_size = curr_alloc_size / 2; 2757 } else { 2758 mem_arr->bus_address.u. 2759 a64.address = (__u64) bus_add; 2760 mem_arr->size = curr_alloc_size; 2761 alloc_size -= curr_alloc_size; 2762 curr_alloc_size = min(be_max_phys_size * 2763 1024, alloc_size); 2764 j++; 2765 mem_arr++; 2766 } 2767 } while (alloc_size); 2768 mem_descr->num_elements = j; 2769 mem_descr->size_in_bytes = phba->mem_req[i]; 2770 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j, 2771 GFP_KERNEL); 2772 if (!mem_descr->mem_array) 2773 goto free_mem; 2774 2775 memcpy(mem_descr->mem_array, mem_arr_orig, 2776 sizeof(struct mem_array) * j); 2777 mem_descr++; 2778 } 2779 kfree(mem_arr_orig); 2780 return 0; 2781 free_mem: 2782 mem_descr->num_elements = j; 2783 while ((i) || (j)) { 2784 for (j = mem_descr->num_elements; j > 0; j--) { 2785 pci_free_consistent(phba->pcidev, 2786 mem_descr->mem_array[j - 1].size, 2787 mem_descr->mem_array[j - 1]. 2788 virtual_address, 2789 (unsigned long)mem_descr-> 2790 mem_array[j - 1]. 2791 bus_address.u.a64.address); 2792 } 2793 if (i) { 2794 i--; 2795 kfree(mem_descr->mem_array); 2796 mem_descr--; 2797 } 2798 } 2799 kfree(mem_arr_orig); 2800 kfree(phba->init_mem); 2801 kfree(phba->phwi_ctrlr->wrb_context); 2802 kfree(phba->phwi_ctrlr); 2803 return -ENOMEM; 2804 } 2805 2806 static int beiscsi_get_memory(struct beiscsi_hba *phba) 2807 { 2808 beiscsi_find_mem_req(phba); 2809 return beiscsi_alloc_mem(phba); 2810 } 2811 2812 static void iscsi_init_global_templates(struct beiscsi_hba *phba) 2813 { 2814 struct pdu_data_out *pdata_out; 2815 struct pdu_nop_out *pnop_out; 2816 struct be_mem_descriptor *mem_descr; 2817 2818 mem_descr = phba->init_mem; 2819 mem_descr += ISCSI_MEM_GLOBAL_HEADER; 2820 pdata_out = 2821 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address; 2822 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2823 2824 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out, 2825 IIOC_SCSI_DATA); 2826 2827 pnop_out = 2828 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0]. 2829 virtual_address + BE_ISCSI_PDU_HEADER_SIZE); 2830 2831 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE); 2832 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF); 2833 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1); 2834 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0); 2835 } 2836 2837 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) 2838 { 2839 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; 2840 struct hwi_context_memory *phwi_ctxt; 2841 struct wrb_handle *pwrb_handle = NULL; 2842 struct hwi_controller *phwi_ctrlr; 2843 struct hwi_wrb_context *pwrb_context; 2844 struct iscsi_wrb *pwrb = NULL; 2845 unsigned int num_cxn_wrbh = 0; 2846 unsigned int num_cxn_wrb = 0, j, idx = 0, index; 2847 2848 mem_descr_wrbh = phba->init_mem; 2849 mem_descr_wrbh += HWI_MEM_WRBH; 2850 2851 mem_descr_wrb = phba->init_mem; 2852 mem_descr_wrb += HWI_MEM_WRB; 2853 phwi_ctrlr = phba->phwi_ctrlr; 2854 2855 /* Allocate memory for WRBQ */ 2856 phwi_ctxt = phwi_ctrlr->phwi_ctxt; 2857 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * 2858 phba->params.cxns_per_ctrl, 2859 GFP_KERNEL); 2860 if (!phwi_ctxt->be_wrbq) { 2861 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2862 "BM_%d : WRBQ Mem Alloc Failed\n"); 2863 return -ENOMEM; 2864 } 2865 2866 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2867 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2868 pwrb_context->pwrb_handle_base = 2869 kzalloc(sizeof(struct wrb_handle *) * 2870 phba->params.wrbs_per_cxn, GFP_KERNEL); 2871 if (!pwrb_context->pwrb_handle_base) { 2872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2873 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2874 goto init_wrb_hndl_failed; 2875 } 2876 pwrb_context->pwrb_handle_basestd = 2877 kzalloc(sizeof(struct wrb_handle *) * 2878 phba->params.wrbs_per_cxn, GFP_KERNEL); 2879 if (!pwrb_context->pwrb_handle_basestd) { 2880 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 2881 "BM_%d : Mem Alloc Failed. Failing to load\n"); 2882 goto init_wrb_hndl_failed; 2883 } 2884 if (!num_cxn_wrbh) { 2885 pwrb_handle = 2886 mem_descr_wrbh->mem_array[idx].virtual_address; 2887 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) / 2888 ((sizeof(struct wrb_handle)) * 2889 phba->params.wrbs_per_cxn)); 2890 idx++; 2891 } 2892 pwrb_context->alloc_index = 0; 2893 pwrb_context->wrb_handles_available = 0; 2894 pwrb_context->free_index = 0; 2895 2896 if (num_cxn_wrbh) { 2897 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2898 pwrb_context->pwrb_handle_base[j] = pwrb_handle; 2899 pwrb_context->pwrb_handle_basestd[j] = 2900 pwrb_handle; 2901 pwrb_context->wrb_handles_available++; 2902 pwrb_handle->wrb_index = j; 2903 pwrb_handle++; 2904 } 2905 num_cxn_wrbh--; 2906 } 2907 } 2908 idx = 0; 2909 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 2910 pwrb_context = &phwi_ctrlr->wrb_context[index]; 2911 if (!num_cxn_wrb) { 2912 pwrb = mem_descr_wrb->mem_array[idx].virtual_address; 2913 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) / 2914 ((sizeof(struct iscsi_wrb) * 2915 phba->params.wrbs_per_cxn)); 2916 idx++; 2917 } 2918 2919 if (num_cxn_wrb) { 2920 for (j = 0; j < phba->params.wrbs_per_cxn; j++) { 2921 pwrb_handle = pwrb_context->pwrb_handle_base[j]; 2922 pwrb_handle->pwrb = pwrb; 2923 pwrb++; 2924 } 2925 num_cxn_wrb--; 2926 } 2927 } 2928 return 0; 2929 init_wrb_hndl_failed: 2930 for (j = index; j > 0; j--) { 2931 pwrb_context = &phwi_ctrlr->wrb_context[j]; 2932 kfree(pwrb_context->pwrb_handle_base); 2933 kfree(pwrb_context->pwrb_handle_basestd); 2934 } 2935 return -ENOMEM; 2936 } 2937 2938 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) 2939 { 2940 uint8_t ulp_num; 2941 struct hwi_controller *phwi_ctrlr; 2942 struct hba_parameters *p = &phba->params; 2943 struct hwi_async_pdu_context *pasync_ctx; 2944 struct async_pdu_handle *pasync_header_h, *pasync_data_h; 2945 unsigned int index, idx, num_per_mem, num_async_data; 2946 struct be_mem_descriptor *mem_descr; 2947 2948 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 2949 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 2950 2951 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2952 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 + 2953 (ulp_num * MEM_DESCR_OFFSET)); 2954 2955 phwi_ctrlr = phba->phwi_ctrlr; 2956 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] = 2957 (struct hwi_async_pdu_context *) 2958 mem_descr->mem_array[0].virtual_address; 2959 2960 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 2961 memset(pasync_ctx, 0, sizeof(*pasync_ctx)); 2962 2963 pasync_ctx->async_entry = 2964 (struct hwi_async_entry *) 2965 ((long unsigned int)pasync_ctx + 2966 sizeof(struct hwi_async_pdu_context)); 2967 2968 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba, 2969 ulp_num); 2970 pasync_ctx->buffer_size = p->defpdu_hdr_sz; 2971 2972 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2973 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 + 2974 (ulp_num * MEM_DESCR_OFFSET); 2975 if (mem_descr->mem_array[0].virtual_address) { 2976 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 2977 "BM_%d : hwi_init_async_pdu_ctx" 2978 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n", 2979 ulp_num, 2980 mem_descr->mem_array[0]. 2981 virtual_address); 2982 } else 2983 beiscsi_log(phba, KERN_WARNING, 2984 BEISCSI_LOG_INIT, 2985 "BM_%d : No Virtual address for ULP : %d\n", 2986 ulp_num); 2987 2988 pasync_ctx->async_header.va_base = 2989 mem_descr->mem_array[0].virtual_address; 2990 2991 pasync_ctx->async_header.pa_base.u.a64.address = 2992 mem_descr->mem_array[0]. 2993 bus_address.u.a64.address; 2994 2995 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 2996 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 2997 (ulp_num * MEM_DESCR_OFFSET); 2998 if (mem_descr->mem_array[0].virtual_address) { 2999 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3000 "BM_%d : hwi_init_async_pdu_ctx" 3001 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n", 3002 ulp_num, 3003 mem_descr->mem_array[0]. 3004 virtual_address); 3005 } else 3006 beiscsi_log(phba, KERN_WARNING, 3007 BEISCSI_LOG_INIT, 3008 "BM_%d : No Virtual address for ULP : %d\n", 3009 ulp_num); 3010 3011 pasync_ctx->async_header.ring_base = 3012 mem_descr->mem_array[0].virtual_address; 3013 3014 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3015 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 + 3016 (ulp_num * MEM_DESCR_OFFSET); 3017 if (mem_descr->mem_array[0].virtual_address) { 3018 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3019 "BM_%d : hwi_init_async_pdu_ctx" 3020 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n", 3021 ulp_num, 3022 mem_descr->mem_array[0]. 3023 virtual_address); 3024 } else 3025 beiscsi_log(phba, KERN_WARNING, 3026 BEISCSI_LOG_INIT, 3027 "BM_%d : No Virtual address for ULP : %d\n", 3028 ulp_num); 3029 3030 pasync_ctx->async_header.handle_base = 3031 mem_descr->mem_array[0].virtual_address; 3032 pasync_ctx->async_header.writables = 0; 3033 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list); 3034 3035 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3036 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3037 (ulp_num * MEM_DESCR_OFFSET); 3038 if (mem_descr->mem_array[0].virtual_address) { 3039 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3040 "BM_%d : hwi_init_async_pdu_ctx" 3041 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n", 3042 ulp_num, 3043 mem_descr->mem_array[0]. 3044 virtual_address); 3045 } else 3046 beiscsi_log(phba, KERN_WARNING, 3047 BEISCSI_LOG_INIT, 3048 "BM_%d : No Virtual address for ULP : %d\n", 3049 ulp_num); 3050 3051 pasync_ctx->async_data.ring_base = 3052 mem_descr->mem_array[0].virtual_address; 3053 3054 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3055 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 + 3056 (ulp_num * MEM_DESCR_OFFSET); 3057 if (!mem_descr->mem_array[0].virtual_address) 3058 beiscsi_log(phba, KERN_WARNING, 3059 BEISCSI_LOG_INIT, 3060 "BM_%d : No Virtual address for ULP : %d\n", 3061 ulp_num); 3062 3063 pasync_ctx->async_data.handle_base = 3064 mem_descr->mem_array[0].virtual_address; 3065 pasync_ctx->async_data.writables = 0; 3066 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list); 3067 3068 pasync_header_h = 3069 (struct async_pdu_handle *) 3070 pasync_ctx->async_header.handle_base; 3071 pasync_data_h = 3072 (struct async_pdu_handle *) 3073 pasync_ctx->async_data.handle_base; 3074 3075 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3076 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 + 3077 (ulp_num * MEM_DESCR_OFFSET); 3078 if (mem_descr->mem_array[0].virtual_address) { 3079 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3080 "BM_%d : hwi_init_async_pdu_ctx" 3081 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n", 3082 ulp_num, 3083 mem_descr->mem_array[0]. 3084 virtual_address); 3085 } else 3086 beiscsi_log(phba, KERN_WARNING, 3087 BEISCSI_LOG_INIT, 3088 "BM_%d : No Virtual address for ULP : %d\n", 3089 ulp_num); 3090 3091 idx = 0; 3092 pasync_ctx->async_data.va_base = 3093 mem_descr->mem_array[idx].virtual_address; 3094 pasync_ctx->async_data.pa_base.u.a64.address = 3095 mem_descr->mem_array[idx]. 3096 bus_address.u.a64.address; 3097 3098 num_async_data = ((mem_descr->mem_array[idx].size) / 3099 phba->params.defpdu_data_sz); 3100 num_per_mem = 0; 3101 3102 for (index = 0; index < BEISCSI_GET_CID_COUNT 3103 (phba, ulp_num); index++) { 3104 pasync_header_h->cri = -1; 3105 pasync_header_h->index = (char)index; 3106 INIT_LIST_HEAD(&pasync_header_h->link); 3107 pasync_header_h->pbuffer = 3108 (void *)((unsigned long) 3109 (pasync_ctx-> 3110 async_header.va_base) + 3111 (p->defpdu_hdr_sz * index)); 3112 3113 pasync_header_h->pa.u.a64.address = 3114 pasync_ctx->async_header.pa_base.u.a64. 3115 address + (p->defpdu_hdr_sz * index); 3116 3117 list_add_tail(&pasync_header_h->link, 3118 &pasync_ctx->async_header. 3119 free_list); 3120 pasync_header_h++; 3121 pasync_ctx->async_header.free_entries++; 3122 pasync_ctx->async_header.writables++; 3123 3124 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3125 wait_queue.list); 3126 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3127 header_busy_list); 3128 pasync_data_h->cri = -1; 3129 pasync_data_h->index = (char)index; 3130 INIT_LIST_HEAD(&pasync_data_h->link); 3131 3132 if (!num_async_data) { 3133 num_per_mem = 0; 3134 idx++; 3135 pasync_ctx->async_data.va_base = 3136 mem_descr->mem_array[idx]. 3137 virtual_address; 3138 pasync_ctx->async_data.pa_base.u. 3139 a64.address = 3140 mem_descr->mem_array[idx]. 3141 bus_address.u.a64.address; 3142 num_async_data = 3143 ((mem_descr->mem_array[idx]. 3144 size) / 3145 phba->params.defpdu_data_sz); 3146 } 3147 pasync_data_h->pbuffer = 3148 (void *)((unsigned long) 3149 (pasync_ctx->async_data.va_base) + 3150 (p->defpdu_data_sz * num_per_mem)); 3151 3152 pasync_data_h->pa.u.a64.address = 3153 pasync_ctx->async_data.pa_base.u.a64. 3154 address + (p->defpdu_data_sz * 3155 num_per_mem); 3156 num_per_mem++; 3157 num_async_data--; 3158 3159 list_add_tail(&pasync_data_h->link, 3160 &pasync_ctx->async_data. 3161 free_list); 3162 pasync_data_h++; 3163 pasync_ctx->async_data.free_entries++; 3164 pasync_ctx->async_data.writables++; 3165 3166 INIT_LIST_HEAD(&pasync_ctx->async_entry[index]. 3167 data_busy_list); 3168 } 3169 3170 pasync_ctx->async_header.host_write_ptr = 0; 3171 pasync_ctx->async_header.ep_read_ptr = -1; 3172 pasync_ctx->async_data.host_write_ptr = 0; 3173 pasync_ctx->async_data.ep_read_ptr = -1; 3174 } 3175 } 3176 3177 return 0; 3178 } 3179 3180 static int 3181 be_sgl_create_contiguous(void *virtual_address, 3182 u64 physical_address, u32 length, 3183 struct be_dma_mem *sgl) 3184 { 3185 WARN_ON(!virtual_address); 3186 WARN_ON(!physical_address); 3187 WARN_ON(!length > 0); 3188 WARN_ON(!sgl); 3189 3190 sgl->va = virtual_address; 3191 sgl->dma = (unsigned long)physical_address; 3192 sgl->size = length; 3193 3194 return 0; 3195 } 3196 3197 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl) 3198 { 3199 memset(sgl, 0, sizeof(*sgl)); 3200 } 3201 3202 static void 3203 hwi_build_be_sgl_arr(struct beiscsi_hba *phba, 3204 struct mem_array *pmem, struct be_dma_mem *sgl) 3205 { 3206 if (sgl->va) 3207 be_sgl_destroy_contiguous(sgl); 3208 3209 be_sgl_create_contiguous(pmem->virtual_address, 3210 pmem->bus_address.u.a64.address, 3211 pmem->size, sgl); 3212 } 3213 3214 static void 3215 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba, 3216 struct mem_array *pmem, struct be_dma_mem *sgl) 3217 { 3218 if (sgl->va) 3219 be_sgl_destroy_contiguous(sgl); 3220 3221 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address, 3222 pmem->bus_address.u.a64.address, 3223 pmem->size, sgl); 3224 } 3225 3226 static int be_fill_queue(struct be_queue_info *q, 3227 u16 len, u16 entry_size, void *vaddress) 3228 { 3229 struct be_dma_mem *mem = &q->dma_mem; 3230 3231 memset(q, 0, sizeof(*q)); 3232 q->len = len; 3233 q->entry_size = entry_size; 3234 mem->size = len * entry_size; 3235 mem->va = vaddress; 3236 if (!mem->va) 3237 return -ENOMEM; 3238 memset(mem->va, 0, mem->size); 3239 return 0; 3240 } 3241 3242 static int beiscsi_create_eqs(struct beiscsi_hba *phba, 3243 struct hwi_context_memory *phwi_context) 3244 { 3245 unsigned int i, num_eq_pages; 3246 int ret = 0, eq_for_mcc; 3247 struct be_queue_info *eq; 3248 struct be_dma_mem *mem; 3249 void *eq_vaddress; 3250 dma_addr_t paddr; 3251 3252 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \ 3253 sizeof(struct be_eq_entry)); 3254 3255 if (phba->msix_enabled) 3256 eq_for_mcc = 1; 3257 else 3258 eq_for_mcc = 0; 3259 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3260 eq = &phwi_context->be_eq[i].q; 3261 mem = &eq->dma_mem; 3262 phwi_context->be_eq[i].phba = phba; 3263 eq_vaddress = pci_alloc_consistent(phba->pcidev, 3264 num_eq_pages * PAGE_SIZE, 3265 &paddr); 3266 if (!eq_vaddress) 3267 goto create_eq_error; 3268 3269 mem->va = eq_vaddress; 3270 ret = be_fill_queue(eq, phba->params.num_eq_entries, 3271 sizeof(struct be_eq_entry), eq_vaddress); 3272 if (ret) { 3273 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3274 "BM_%d : be_fill_queue Failed for EQ\n"); 3275 goto create_eq_error; 3276 } 3277 3278 mem->dma = paddr; 3279 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq, 3280 phwi_context->cur_eqd); 3281 if (ret) { 3282 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3283 "BM_%d : beiscsi_cmd_eq_create" 3284 "Failed for EQ\n"); 3285 goto create_eq_error; 3286 } 3287 3288 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3289 "BM_%d : eqid = %d\n", 3290 phwi_context->be_eq[i].q.id); 3291 } 3292 return 0; 3293 create_eq_error: 3294 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3295 eq = &phwi_context->be_eq[i].q; 3296 mem = &eq->dma_mem; 3297 if (mem->va) 3298 pci_free_consistent(phba->pcidev, num_eq_pages 3299 * PAGE_SIZE, 3300 mem->va, mem->dma); 3301 } 3302 return ret; 3303 } 3304 3305 static int beiscsi_create_cqs(struct beiscsi_hba *phba, 3306 struct hwi_context_memory *phwi_context) 3307 { 3308 unsigned int i, num_cq_pages; 3309 int ret = 0; 3310 struct be_queue_info *cq, *eq; 3311 struct be_dma_mem *mem; 3312 struct be_eq_obj *pbe_eq; 3313 void *cq_vaddress; 3314 dma_addr_t paddr; 3315 3316 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \ 3317 sizeof(struct sol_cqe)); 3318 3319 for (i = 0; i < phba->num_cpus; i++) { 3320 cq = &phwi_context->be_cq[i]; 3321 eq = &phwi_context->be_eq[i].q; 3322 pbe_eq = &phwi_context->be_eq[i]; 3323 pbe_eq->cq = cq; 3324 pbe_eq->phba = phba; 3325 mem = &cq->dma_mem; 3326 cq_vaddress = pci_alloc_consistent(phba->pcidev, 3327 num_cq_pages * PAGE_SIZE, 3328 &paddr); 3329 if (!cq_vaddress) 3330 goto create_cq_error; 3331 ret = be_fill_queue(cq, phba->params.num_cq_entries, 3332 sizeof(struct sol_cqe), cq_vaddress); 3333 if (ret) { 3334 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3335 "BM_%d : be_fill_queue Failed " 3336 "for ISCSI CQ\n"); 3337 goto create_cq_error; 3338 } 3339 3340 mem->dma = paddr; 3341 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false, 3342 false, 0); 3343 if (ret) { 3344 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3345 "BM_%d : beiscsi_cmd_eq_create" 3346 "Failed for ISCSI CQ\n"); 3347 goto create_cq_error; 3348 } 3349 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3350 "BM_%d : iscsi cq_id is %d for eq_id %d\n" 3351 "iSCSI CQ CREATED\n", cq->id, eq->id); 3352 } 3353 return 0; 3354 3355 create_cq_error: 3356 for (i = 0; i < phba->num_cpus; i++) { 3357 cq = &phwi_context->be_cq[i]; 3358 mem = &cq->dma_mem; 3359 if (mem->va) 3360 pci_free_consistent(phba->pcidev, num_cq_pages 3361 * PAGE_SIZE, 3362 mem->va, mem->dma); 3363 } 3364 return ret; 3365 3366 } 3367 3368 static int 3369 beiscsi_create_def_hdr(struct beiscsi_hba *phba, 3370 struct hwi_context_memory *phwi_context, 3371 struct hwi_controller *phwi_ctrlr, 3372 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3373 { 3374 unsigned int idx; 3375 int ret; 3376 struct be_queue_info *dq, *cq; 3377 struct be_dma_mem *mem; 3378 struct be_mem_descriptor *mem_descr; 3379 void *dq_vaddress; 3380 3381 idx = 0; 3382 dq = &phwi_context->be_def_hdrq[ulp_num]; 3383 cq = &phwi_context->be_cq[0]; 3384 mem = &dq->dma_mem; 3385 mem_descr = phba->init_mem; 3386 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 + 3387 (ulp_num * MEM_DESCR_OFFSET); 3388 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3389 ret = be_fill_queue(dq, mem_descr->mem_array[0].size / 3390 sizeof(struct phys_addr), 3391 sizeof(struct phys_addr), dq_vaddress); 3392 if (ret) { 3393 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3394 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n", 3395 ulp_num); 3396 3397 return ret; 3398 } 3399 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3400 bus_address.u.a64.address; 3401 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq, 3402 def_pdu_ring_sz, 3403 phba->params.defpdu_hdr_sz, 3404 BEISCSI_DEFQ_HDR, ulp_num); 3405 if (ret) { 3406 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3407 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n", 3408 ulp_num); 3409 3410 return ret; 3411 } 3412 3413 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3414 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n", 3415 ulp_num, 3416 phwi_context->be_def_hdrq[ulp_num].id); 3417 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num); 3418 return 0; 3419 } 3420 3421 static int 3422 beiscsi_create_def_data(struct beiscsi_hba *phba, 3423 struct hwi_context_memory *phwi_context, 3424 struct hwi_controller *phwi_ctrlr, 3425 unsigned int def_pdu_ring_sz, uint8_t ulp_num) 3426 { 3427 unsigned int idx; 3428 int ret; 3429 struct be_queue_info *dataq, *cq; 3430 struct be_dma_mem *mem; 3431 struct be_mem_descriptor *mem_descr; 3432 void *dq_vaddress; 3433 3434 idx = 0; 3435 dataq = &phwi_context->be_def_dataq[ulp_num]; 3436 cq = &phwi_context->be_cq[0]; 3437 mem = &dataq->dma_mem; 3438 mem_descr = phba->init_mem; 3439 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 + 3440 (ulp_num * MEM_DESCR_OFFSET); 3441 dq_vaddress = mem_descr->mem_array[idx].virtual_address; 3442 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size / 3443 sizeof(struct phys_addr), 3444 sizeof(struct phys_addr), dq_vaddress); 3445 if (ret) { 3446 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3447 "BM_%d : be_fill_queue Failed for DEF PDU " 3448 "DATA on ULP : %d\n", 3449 ulp_num); 3450 3451 return ret; 3452 } 3453 mem->dma = (unsigned long)mem_descr->mem_array[idx]. 3454 bus_address.u.a64.address; 3455 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq, 3456 def_pdu_ring_sz, 3457 phba->params.defpdu_data_sz, 3458 BEISCSI_DEFQ_DATA, ulp_num); 3459 if (ret) { 3460 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3461 "BM_%d be_cmd_create_default_pdu_queue" 3462 " Failed for DEF PDU DATA on ULP : %d\n", 3463 ulp_num); 3464 return ret; 3465 } 3466 3467 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3468 "BM_%d : iscsi def data id on ULP : %d is %d\n", 3469 ulp_num, 3470 phwi_context->be_def_dataq[ulp_num].id); 3471 3472 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num); 3473 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3474 "BM_%d : DEFAULT PDU DATA RING CREATED" 3475 "on ULP : %d\n", ulp_num); 3476 3477 return 0; 3478 } 3479 3480 3481 static int 3482 beiscsi_post_template_hdr(struct beiscsi_hba *phba) 3483 { 3484 struct be_mem_descriptor *mem_descr; 3485 struct mem_array *pm_arr; 3486 struct be_dma_mem sgl; 3487 int status, ulp_num; 3488 3489 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3490 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3491 mem_descr = (struct be_mem_descriptor *)phba->init_mem; 3492 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 + 3493 (ulp_num * MEM_DESCR_OFFSET); 3494 pm_arr = mem_descr->mem_array; 3495 3496 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3497 status = be_cmd_iscsi_post_template_hdr( 3498 &phba->ctrl, &sgl); 3499 3500 if (status != 0) { 3501 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3502 "BM_%d : Post Template HDR Failed for" 3503 "ULP_%d\n", ulp_num); 3504 return status; 3505 } 3506 3507 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3508 "BM_%d : Template HDR Pages Posted for" 3509 "ULP_%d\n", ulp_num); 3510 } 3511 } 3512 return 0; 3513 } 3514 3515 static int 3516 beiscsi_post_pages(struct beiscsi_hba *phba) 3517 { 3518 struct be_mem_descriptor *mem_descr; 3519 struct mem_array *pm_arr; 3520 unsigned int page_offset, i; 3521 struct be_dma_mem sgl; 3522 int status, ulp_num = 0; 3523 3524 mem_descr = phba->init_mem; 3525 mem_descr += HWI_MEM_SGE; 3526 pm_arr = mem_descr->mem_array; 3527 3528 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3529 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 3530 break; 3531 3532 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io * 3533 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE; 3534 for (i = 0; i < mem_descr->num_elements; i++) { 3535 hwi_build_be_sgl_arr(phba, pm_arr, &sgl); 3536 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl, 3537 page_offset, 3538 (pm_arr->size / PAGE_SIZE)); 3539 page_offset += pm_arr->size / PAGE_SIZE; 3540 if (status != 0) { 3541 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3542 "BM_%d : post sgl failed.\n"); 3543 return status; 3544 } 3545 pm_arr++; 3546 } 3547 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3548 "BM_%d : POSTED PAGES\n"); 3549 return 0; 3550 } 3551 3552 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) 3553 { 3554 struct be_dma_mem *mem = &q->dma_mem; 3555 if (mem->va) { 3556 pci_free_consistent(phba->pcidev, mem->size, 3557 mem->va, mem->dma); 3558 mem->va = NULL; 3559 } 3560 } 3561 3562 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, 3563 u16 len, u16 entry_size) 3564 { 3565 struct be_dma_mem *mem = &q->dma_mem; 3566 3567 memset(q, 0, sizeof(*q)); 3568 q->len = len; 3569 q->entry_size = entry_size; 3570 mem->size = len * entry_size; 3571 mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma); 3572 if (!mem->va) 3573 return -ENOMEM; 3574 return 0; 3575 } 3576 3577 static int 3578 beiscsi_create_wrb_rings(struct beiscsi_hba *phba, 3579 struct hwi_context_memory *phwi_context, 3580 struct hwi_controller *phwi_ctrlr) 3581 { 3582 unsigned int wrb_mem_index, offset, size, num_wrb_rings; 3583 u64 pa_addr_lo; 3584 unsigned int idx, num, i, ulp_num; 3585 struct mem_array *pwrb_arr; 3586 void *wrb_vaddr; 3587 struct be_dma_mem sgl; 3588 struct be_mem_descriptor *mem_descr; 3589 struct hwi_wrb_context *pwrb_context; 3590 int status; 3591 uint8_t ulp_count = 0, ulp_base_num = 0; 3592 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 }; 3593 3594 idx = 0; 3595 mem_descr = phba->init_mem; 3596 mem_descr += HWI_MEM_WRB; 3597 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl, 3598 GFP_KERNEL); 3599 if (!pwrb_arr) { 3600 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3601 "BM_%d : Memory alloc failed in create wrb ring.\n"); 3602 return -ENOMEM; 3603 } 3604 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3605 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address; 3606 num_wrb_rings = mem_descr->mem_array[idx].size / 3607 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb)); 3608 3609 for (num = 0; num < phba->params.cxns_per_ctrl; num++) { 3610 if (num_wrb_rings) { 3611 pwrb_arr[num].virtual_address = wrb_vaddr; 3612 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo; 3613 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3614 sizeof(struct iscsi_wrb); 3615 wrb_vaddr += pwrb_arr[num].size; 3616 pa_addr_lo += pwrb_arr[num].size; 3617 num_wrb_rings--; 3618 } else { 3619 idx++; 3620 wrb_vaddr = mem_descr->mem_array[idx].virtual_address; 3621 pa_addr_lo = mem_descr->mem_array[idx].\ 3622 bus_address.u.a64.address; 3623 num_wrb_rings = mem_descr->mem_array[idx].size / 3624 (phba->params.wrbs_per_cxn * 3625 sizeof(struct iscsi_wrb)); 3626 pwrb_arr[num].virtual_address = wrb_vaddr; 3627 pwrb_arr[num].bus_address.u.a64.address\ 3628 = pa_addr_lo; 3629 pwrb_arr[num].size = phba->params.wrbs_per_cxn * 3630 sizeof(struct iscsi_wrb); 3631 wrb_vaddr += pwrb_arr[num].size; 3632 pa_addr_lo += pwrb_arr[num].size; 3633 num_wrb_rings--; 3634 } 3635 } 3636 3637 /* Get the ULP Count */ 3638 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 3639 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3640 ulp_count++; 3641 ulp_base_num = ulp_num; 3642 cid_count_ulp[ulp_num] = 3643 BEISCSI_GET_CID_COUNT(phba, ulp_num); 3644 } 3645 3646 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3647 wrb_mem_index = 0; 3648 offset = 0; 3649 size = 0; 3650 3651 if (ulp_count > 1) { 3652 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT; 3653 3654 if (!cid_count_ulp[ulp_base_num]) 3655 ulp_base_num = (ulp_base_num + 1) % 3656 BEISCSI_ULP_COUNT; 3657 3658 cid_count_ulp[ulp_base_num]--; 3659 } 3660 3661 3662 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl); 3663 status = be_cmd_wrbq_create(&phba->ctrl, &sgl, 3664 &phwi_context->be_wrbq[i], 3665 &phwi_ctrlr->wrb_context[i], 3666 ulp_base_num); 3667 if (status != 0) { 3668 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3669 "BM_%d : wrbq create failed."); 3670 kfree(pwrb_arr); 3671 return status; 3672 } 3673 pwrb_context = &phwi_ctrlr->wrb_context[i]; 3674 BE_SET_CID_TO_CRI(i, pwrb_context->cid); 3675 } 3676 kfree(pwrb_arr); 3677 return 0; 3678 } 3679 3680 static void free_wrb_handles(struct beiscsi_hba *phba) 3681 { 3682 unsigned int index; 3683 struct hwi_controller *phwi_ctrlr; 3684 struct hwi_wrb_context *pwrb_context; 3685 3686 phwi_ctrlr = phba->phwi_ctrlr; 3687 for (index = 0; index < phba->params.cxns_per_ctrl; index++) { 3688 pwrb_context = &phwi_ctrlr->wrb_context[index]; 3689 kfree(pwrb_context->pwrb_handle_base); 3690 kfree(pwrb_context->pwrb_handle_basestd); 3691 } 3692 } 3693 3694 static void be_mcc_queues_destroy(struct beiscsi_hba *phba) 3695 { 3696 struct be_queue_info *q; 3697 struct be_ctrl_info *ctrl = &phba->ctrl; 3698 3699 q = &phba->ctrl.mcc_obj.q; 3700 if (q->created) { 3701 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); 3702 be_queue_free(phba, q); 3703 } 3704 3705 q = &phba->ctrl.mcc_obj.cq; 3706 if (q->created) { 3707 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3708 be_queue_free(phba, q); 3709 } 3710 } 3711 3712 static void hwi_cleanup(struct beiscsi_hba *phba) 3713 { 3714 struct be_queue_info *q; 3715 struct be_ctrl_info *ctrl = &phba->ctrl; 3716 struct hwi_controller *phwi_ctrlr; 3717 struct hwi_context_memory *phwi_context; 3718 struct hwi_async_pdu_context *pasync_ctx; 3719 int i, eq_for_mcc, ulp_num; 3720 3721 phwi_ctrlr = phba->phwi_ctrlr; 3722 phwi_context = phwi_ctrlr->phwi_ctxt; 3723 3724 be_cmd_iscsi_remove_template_hdr(ctrl); 3725 3726 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 3727 q = &phwi_context->be_wrbq[i]; 3728 if (q->created) 3729 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); 3730 } 3731 kfree(phwi_context->be_wrbq); 3732 free_wrb_handles(phba); 3733 3734 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3735 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3736 3737 q = &phwi_context->be_def_hdrq[ulp_num]; 3738 if (q->created) 3739 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3740 3741 q = &phwi_context->be_def_dataq[ulp_num]; 3742 if (q->created) 3743 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); 3744 3745 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num]; 3746 } 3747 } 3748 3749 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); 3750 3751 for (i = 0; i < (phba->num_cpus); i++) { 3752 q = &phwi_context->be_cq[i]; 3753 if (q->created) { 3754 be_queue_free(phba, q); 3755 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); 3756 } 3757 } 3758 3759 be_mcc_queues_destroy(phba); 3760 if (phba->msix_enabled) 3761 eq_for_mcc = 1; 3762 else 3763 eq_for_mcc = 0; 3764 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) { 3765 q = &phwi_context->be_eq[i].q; 3766 if (q->created) { 3767 be_queue_free(phba, q); 3768 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); 3769 } 3770 } 3771 be_cmd_fw_uninit(ctrl); 3772 } 3773 3774 static int be_mcc_queues_create(struct beiscsi_hba *phba, 3775 struct hwi_context_memory *phwi_context) 3776 { 3777 struct be_queue_info *q, *cq; 3778 struct be_ctrl_info *ctrl = &phba->ctrl; 3779 3780 /* Alloc MCC compl queue */ 3781 cq = &phba->ctrl.mcc_obj.cq; 3782 if (be_queue_alloc(phba, cq, MCC_CQ_LEN, 3783 sizeof(struct be_mcc_compl))) 3784 goto err; 3785 /* Ask BE to create MCC compl queue; */ 3786 if (phba->msix_enabled) { 3787 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq 3788 [phba->num_cpus].q, false, true, 0)) 3789 goto mcc_cq_free; 3790 } else { 3791 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, 3792 false, true, 0)) 3793 goto mcc_cq_free; 3794 } 3795 3796 /* Alloc MCC queue */ 3797 q = &phba->ctrl.mcc_obj.q; 3798 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) 3799 goto mcc_cq_destroy; 3800 3801 /* Ask BE to create MCC queue */ 3802 if (beiscsi_cmd_mccq_create(phba, q, cq)) 3803 goto mcc_q_free; 3804 3805 return 0; 3806 3807 mcc_q_free: 3808 be_queue_free(phba, q); 3809 mcc_cq_destroy: 3810 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ); 3811 mcc_cq_free: 3812 be_queue_free(phba, cq); 3813 err: 3814 return -ENOMEM; 3815 } 3816 3817 /** 3818 * find_num_cpus()- Get the CPU online count 3819 * @phba: ptr to priv structure 3820 * 3821 * CPU count is used for creating EQ. 3822 **/ 3823 static void find_num_cpus(struct beiscsi_hba *phba) 3824 { 3825 int num_cpus = 0; 3826 3827 num_cpus = num_online_cpus(); 3828 3829 switch (phba->generation) { 3830 case BE_GEN2: 3831 case BE_GEN3: 3832 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ? 3833 BEISCSI_MAX_NUM_CPUS : num_cpus; 3834 break; 3835 case BE_GEN4: 3836 /* 3837 * If eqid_count == 1 fall back to 3838 * INTX mechanism 3839 **/ 3840 if (phba->fw_config.eqid_count == 1) { 3841 enable_msix = 0; 3842 phba->num_cpus = 1; 3843 return; 3844 } 3845 3846 phba->num_cpus = 3847 (num_cpus > (phba->fw_config.eqid_count - 1)) ? 3848 (phba->fw_config.eqid_count - 1) : num_cpus; 3849 break; 3850 default: 3851 phba->num_cpus = 1; 3852 } 3853 } 3854 3855 static int hwi_init_port(struct beiscsi_hba *phba) 3856 { 3857 struct hwi_controller *phwi_ctrlr; 3858 struct hwi_context_memory *phwi_context; 3859 unsigned int def_pdu_ring_sz; 3860 struct be_ctrl_info *ctrl = &phba->ctrl; 3861 int status, ulp_num; 3862 3863 phwi_ctrlr = phba->phwi_ctrlr; 3864 phwi_context = phwi_ctrlr->phwi_ctxt; 3865 phwi_context->max_eqd = 128; 3866 phwi_context->min_eqd = 0; 3867 phwi_context->cur_eqd = 0; 3868 be_cmd_fw_initialize(&phba->ctrl); 3869 3870 status = beiscsi_create_eqs(phba, phwi_context); 3871 if (status != 0) { 3872 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3873 "BM_%d : EQ not created\n"); 3874 goto error; 3875 } 3876 3877 status = be_mcc_queues_create(phba, phwi_context); 3878 if (status != 0) 3879 goto error; 3880 3881 status = mgmt_check_supported_fw(ctrl, phba); 3882 if (status != 0) { 3883 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3884 "BM_%d : Unsupported fw version\n"); 3885 goto error; 3886 } 3887 3888 status = beiscsi_create_cqs(phba, phwi_context); 3889 if (status != 0) { 3890 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3891 "BM_%d : CQ not created\n"); 3892 goto error; 3893 } 3894 3895 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3896 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3897 3898 def_pdu_ring_sz = 3899 BEISCSI_GET_CID_COUNT(phba, ulp_num) * 3900 sizeof(struct phys_addr); 3901 3902 status = beiscsi_create_def_hdr(phba, phwi_context, 3903 phwi_ctrlr, 3904 def_pdu_ring_sz, 3905 ulp_num); 3906 if (status != 0) { 3907 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3908 "BM_%d : Default Header not created for ULP : %d\n", 3909 ulp_num); 3910 goto error; 3911 } 3912 3913 status = beiscsi_create_def_data(phba, phwi_context, 3914 phwi_ctrlr, 3915 def_pdu_ring_sz, 3916 ulp_num); 3917 if (status != 0) { 3918 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3919 "BM_%d : Default Data not created for ULP : %d\n", 3920 ulp_num); 3921 goto error; 3922 } 3923 } 3924 } 3925 3926 status = beiscsi_post_pages(phba); 3927 if (status != 0) { 3928 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3929 "BM_%d : Post SGL Pages Failed\n"); 3930 goto error; 3931 } 3932 3933 status = beiscsi_post_template_hdr(phba); 3934 if (status != 0) { 3935 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3936 "BM_%d : Template HDR Posting for CXN Failed\n"); 3937 } 3938 3939 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr); 3940 if (status != 0) { 3941 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3942 "BM_%d : WRB Rings not created\n"); 3943 goto error; 3944 } 3945 3946 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 3947 uint16_t async_arr_idx = 0; 3948 3949 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) { 3950 uint16_t cri = 0; 3951 struct hwi_async_pdu_context *pasync_ctx; 3952 3953 pasync_ctx = HWI_GET_ASYNC_PDU_CTX( 3954 phwi_ctrlr, ulp_num); 3955 for (cri = 0; cri < 3956 phba->params.cxns_per_ctrl; cri++) { 3957 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI 3958 (phwi_ctrlr, cri)) 3959 pasync_ctx->cid_to_async_cri_map[ 3960 phwi_ctrlr->wrb_context[cri].cid] = 3961 async_arr_idx++; 3962 } 3963 } 3964 } 3965 3966 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3967 "BM_%d : hwi_init_port success\n"); 3968 return 0; 3969 3970 error: 3971 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3972 "BM_%d : hwi_init_port failed"); 3973 hwi_cleanup(phba); 3974 return status; 3975 } 3976 3977 static int hwi_init_controller(struct beiscsi_hba *phba) 3978 { 3979 struct hwi_controller *phwi_ctrlr; 3980 3981 phwi_ctrlr = phba->phwi_ctrlr; 3982 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) { 3983 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba-> 3984 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address; 3985 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 3986 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n", 3987 phwi_ctrlr->phwi_ctxt); 3988 } else { 3989 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 3990 "BM_%d : HWI_MEM_ADDN_CONTEXT is more " 3991 "than one element.Failing to load\n"); 3992 return -ENOMEM; 3993 } 3994 3995 iscsi_init_global_templates(phba); 3996 if (beiscsi_init_wrb_handle(phba)) 3997 return -ENOMEM; 3998 3999 if (hwi_init_async_pdu_ctx(phba)) { 4000 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4001 "BM_%d : hwi_init_async_pdu_ctx failed\n"); 4002 return -ENOMEM; 4003 } 4004 4005 if (hwi_init_port(phba) != 0) { 4006 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4007 "BM_%d : hwi_init_controller failed\n"); 4008 4009 return -ENOMEM; 4010 } 4011 return 0; 4012 } 4013 4014 static void beiscsi_free_mem(struct beiscsi_hba *phba) 4015 { 4016 struct be_mem_descriptor *mem_descr; 4017 int i, j; 4018 4019 mem_descr = phba->init_mem; 4020 i = 0; 4021 j = 0; 4022 for (i = 0; i < SE_MEM_MAX; i++) { 4023 for (j = mem_descr->num_elements; j > 0; j--) { 4024 pci_free_consistent(phba->pcidev, 4025 mem_descr->mem_array[j - 1].size, 4026 mem_descr->mem_array[j - 1].virtual_address, 4027 (unsigned long)mem_descr->mem_array[j - 1]. 4028 bus_address.u.a64.address); 4029 } 4030 4031 kfree(mem_descr->mem_array); 4032 mem_descr++; 4033 } 4034 kfree(phba->init_mem); 4035 kfree(phba->phwi_ctrlr->wrb_context); 4036 kfree(phba->phwi_ctrlr); 4037 } 4038 4039 static int beiscsi_init_controller(struct beiscsi_hba *phba) 4040 { 4041 int ret = -ENOMEM; 4042 4043 ret = beiscsi_get_memory(phba); 4044 if (ret < 0) { 4045 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4046 "BM_%d : beiscsi_dev_probe -" 4047 "Failed in beiscsi_alloc_memory\n"); 4048 return ret; 4049 } 4050 4051 ret = hwi_init_controller(phba); 4052 if (ret) 4053 goto free_init; 4054 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4055 "BM_%d : Return success from beiscsi_init_controller"); 4056 4057 return 0; 4058 4059 free_init: 4060 beiscsi_free_mem(phba); 4061 return ret; 4062 } 4063 4064 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) 4065 { 4066 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg; 4067 struct sgl_handle *psgl_handle; 4068 struct iscsi_sge *pfrag; 4069 unsigned int arr_index, i, idx; 4070 unsigned int ulp_icd_start, ulp_num = 0; 4071 4072 phba->io_sgl_hndl_avbl = 0; 4073 phba->eh_sgl_hndl_avbl = 0; 4074 4075 mem_descr_sglh = phba->init_mem; 4076 mem_descr_sglh += HWI_MEM_SGLH; 4077 if (1 == mem_descr_sglh->num_elements) { 4078 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4079 phba->params.ios_per_ctrl, 4080 GFP_KERNEL); 4081 if (!phba->io_sgl_hndl_base) { 4082 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4083 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4084 return -ENOMEM; 4085 } 4086 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) * 4087 (phba->params.icds_per_ctrl - 4088 phba->params.ios_per_ctrl), 4089 GFP_KERNEL); 4090 if (!phba->eh_sgl_hndl_base) { 4091 kfree(phba->io_sgl_hndl_base); 4092 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4093 "BM_%d : Mem Alloc Failed. Failing to load\n"); 4094 return -ENOMEM; 4095 } 4096 } else { 4097 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4098 "BM_%d : HWI_MEM_SGLH is more than one element." 4099 "Failing to load\n"); 4100 return -ENOMEM; 4101 } 4102 4103 arr_index = 0; 4104 idx = 0; 4105 while (idx < mem_descr_sglh->num_elements) { 4106 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address; 4107 4108 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size / 4109 sizeof(struct sgl_handle)); i++) { 4110 if (arr_index < phba->params.ios_per_ctrl) { 4111 phba->io_sgl_hndl_base[arr_index] = psgl_handle; 4112 phba->io_sgl_hndl_avbl++; 4113 arr_index++; 4114 } else { 4115 phba->eh_sgl_hndl_base[arr_index - 4116 phba->params.ios_per_ctrl] = 4117 psgl_handle; 4118 arr_index++; 4119 phba->eh_sgl_hndl_avbl++; 4120 } 4121 psgl_handle++; 4122 } 4123 idx++; 4124 } 4125 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4126 "BM_%d : phba->io_sgl_hndl_avbl=%d" 4127 "phba->eh_sgl_hndl_avbl=%d\n", 4128 phba->io_sgl_hndl_avbl, 4129 phba->eh_sgl_hndl_avbl); 4130 4131 mem_descr_sg = phba->init_mem; 4132 mem_descr_sg += HWI_MEM_SGE; 4133 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4134 "\n BM_%d : mem_descr_sg->num_elements=%d\n", 4135 mem_descr_sg->num_elements); 4136 4137 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) 4138 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) 4139 break; 4140 4141 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num]; 4142 4143 arr_index = 0; 4144 idx = 0; 4145 while (idx < mem_descr_sg->num_elements) { 4146 pfrag = mem_descr_sg->mem_array[idx].virtual_address; 4147 4148 for (i = 0; 4149 i < (mem_descr_sg->mem_array[idx].size) / 4150 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io); 4151 i++) { 4152 if (arr_index < phba->params.ios_per_ctrl) 4153 psgl_handle = phba->io_sgl_hndl_base[arr_index]; 4154 else 4155 psgl_handle = phba->eh_sgl_hndl_base[arr_index - 4156 phba->params.ios_per_ctrl]; 4157 psgl_handle->pfrag = pfrag; 4158 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0); 4159 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0); 4160 pfrag += phba->params.num_sge_per_io; 4161 psgl_handle->sgl_index = ulp_icd_start + arr_index++; 4162 } 4163 idx++; 4164 } 4165 phba->io_sgl_free_index = 0; 4166 phba->io_sgl_alloc_index = 0; 4167 phba->eh_sgl_free_index = 0; 4168 phba->eh_sgl_alloc_index = 0; 4169 return 0; 4170 } 4171 4172 static int hba_setup_cid_tbls(struct beiscsi_hba *phba) 4173 { 4174 int ret; 4175 uint16_t i, ulp_num; 4176 struct ulp_cid_info *ptr_cid_info = NULL; 4177 4178 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4179 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4180 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info), 4181 GFP_KERNEL); 4182 4183 if (!ptr_cid_info) { 4184 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4185 "BM_%d : Failed to allocate memory" 4186 "for ULP_CID_INFO for ULP : %d\n", 4187 ulp_num); 4188 ret = -ENOMEM; 4189 goto free_memory; 4190 4191 } 4192 4193 /* Allocate memory for CID array */ 4194 ptr_cid_info->cid_array = kzalloc(sizeof(void *) * 4195 BEISCSI_GET_CID_COUNT(phba, 4196 ulp_num), GFP_KERNEL); 4197 if (!ptr_cid_info->cid_array) { 4198 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4199 "BM_%d : Failed to allocate memory" 4200 "for CID_ARRAY for ULP : %d\n", 4201 ulp_num); 4202 kfree(ptr_cid_info); 4203 ptr_cid_info = NULL; 4204 ret = -ENOMEM; 4205 4206 goto free_memory; 4207 } 4208 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT( 4209 phba, ulp_num); 4210 4211 /* Save the cid_info_array ptr */ 4212 phba->cid_array_info[ulp_num] = ptr_cid_info; 4213 } 4214 } 4215 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * 4216 phba->params.cxns_per_ctrl, GFP_KERNEL); 4217 if (!phba->ep_array) { 4218 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4219 "BM_%d : Failed to allocate memory in " 4220 "hba_setup_cid_tbls\n"); 4221 ret = -ENOMEM; 4222 4223 goto free_memory; 4224 } 4225 4226 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * 4227 phba->params.cxns_per_ctrl, GFP_KERNEL); 4228 if (!phba->conn_table) { 4229 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4230 "BM_%d : Failed to allocate memory in" 4231 "hba_setup_cid_tbls\n"); 4232 4233 kfree(phba->ep_array); 4234 phba->ep_array = NULL; 4235 ret = -ENOMEM; 4236 4237 goto free_memory; 4238 } 4239 4240 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4241 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num; 4242 4243 ptr_cid_info = phba->cid_array_info[ulp_num]; 4244 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] = 4245 phba->phwi_ctrlr->wrb_context[i].cid; 4246 4247 } 4248 4249 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4250 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4251 ptr_cid_info = phba->cid_array_info[ulp_num]; 4252 4253 ptr_cid_info->cid_alloc = 0; 4254 ptr_cid_info->cid_free = 0; 4255 } 4256 } 4257 return 0; 4258 4259 free_memory: 4260 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4261 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4262 ptr_cid_info = phba->cid_array_info[ulp_num]; 4263 4264 if (ptr_cid_info) { 4265 kfree(ptr_cid_info->cid_array); 4266 kfree(ptr_cid_info); 4267 phba->cid_array_info[ulp_num] = NULL; 4268 } 4269 } 4270 } 4271 4272 return ret; 4273 } 4274 4275 static void hwi_enable_intr(struct beiscsi_hba *phba) 4276 { 4277 struct be_ctrl_info *ctrl = &phba->ctrl; 4278 struct hwi_controller *phwi_ctrlr; 4279 struct hwi_context_memory *phwi_context; 4280 struct be_queue_info *eq; 4281 u8 __iomem *addr; 4282 u32 reg, i; 4283 u32 enabled; 4284 4285 phwi_ctrlr = phba->phwi_ctrlr; 4286 phwi_context = phwi_ctrlr->phwi_ctxt; 4287 4288 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg + 4289 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET); 4290 reg = ioread32(addr); 4291 4292 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4293 if (!enabled) { 4294 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4295 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4296 "BM_%d : reg =x%08x addr=%p\n", reg, addr); 4297 iowrite32(reg, addr); 4298 } 4299 4300 if (!phba->msix_enabled) { 4301 eq = &phwi_context->be_eq[0].q; 4302 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4303 "BM_%d : eq->id=%d\n", eq->id); 4304 4305 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4306 } else { 4307 for (i = 0; i <= phba->num_cpus; i++) { 4308 eq = &phwi_context->be_eq[i].q; 4309 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 4310 "BM_%d : eq->id=%d\n", eq->id); 4311 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1); 4312 } 4313 } 4314 } 4315 4316 static void hwi_disable_intr(struct beiscsi_hba *phba) 4317 { 4318 struct be_ctrl_info *ctrl = &phba->ctrl; 4319 4320 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET; 4321 u32 reg = ioread32(addr); 4322 4323 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4324 if (enabled) { 4325 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 4326 iowrite32(reg, addr); 4327 } else 4328 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 4329 "BM_%d : In hwi_disable_intr, Already Disabled\n"); 4330 } 4331 4332 /** 4333 * beiscsi_get_boot_info()- Get the boot session info 4334 * @phba: The device priv structure instance 4335 * 4336 * Get the boot target info and store in driver priv structure 4337 * 4338 * return values 4339 * Success: 0 4340 * Failure: Non-Zero Value 4341 **/ 4342 static int beiscsi_get_boot_info(struct beiscsi_hba *phba) 4343 { 4344 struct be_cmd_get_session_resp *session_resp; 4345 struct be_dma_mem nonemb_cmd; 4346 unsigned int tag; 4347 unsigned int s_handle; 4348 int ret = -ENOMEM; 4349 4350 /* Get the session handle of the boot target */ 4351 ret = be_mgmt_get_boot_shandle(phba, &s_handle); 4352 if (ret) { 4353 beiscsi_log(phba, KERN_ERR, 4354 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4355 "BM_%d : No boot session\n"); 4356 4357 if (ret == -ENXIO) 4358 phba->get_boot = 0; 4359 4360 4361 return ret; 4362 } 4363 phba->get_boot = 0; 4364 nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev, 4365 sizeof(*session_resp), 4366 &nonemb_cmd.dma); 4367 if (nonemb_cmd.va == NULL) { 4368 beiscsi_log(phba, KERN_ERR, 4369 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4370 "BM_%d : Failed to allocate memory for" 4371 "beiscsi_get_session_info\n"); 4372 4373 return -ENOMEM; 4374 } 4375 4376 tag = mgmt_get_session_info(phba, s_handle, 4377 &nonemb_cmd); 4378 if (!tag) { 4379 beiscsi_log(phba, KERN_ERR, 4380 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4381 "BM_%d : beiscsi_get_session_info" 4382 " Failed\n"); 4383 4384 goto boot_freemem; 4385 } 4386 4387 ret = beiscsi_mccq_compl(phba, tag, NULL, &nonemb_cmd); 4388 if (ret) { 4389 beiscsi_log(phba, KERN_ERR, 4390 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG, 4391 "BM_%d : beiscsi_get_session_info Failed"); 4392 4393 if (ret != -EBUSY) 4394 goto boot_freemem; 4395 else 4396 return ret; 4397 } 4398 4399 session_resp = nonemb_cmd.va ; 4400 4401 memcpy(&phba->boot_sess, &session_resp->session_info, 4402 sizeof(struct mgmt_session_info)); 4403 4404 beiscsi_logout_fw_sess(phba, 4405 phba->boot_sess.session_handle); 4406 ret = 0; 4407 4408 boot_freemem: 4409 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 4410 nonemb_cmd.va, nonemb_cmd.dma); 4411 return ret; 4412 } 4413 4414 static void beiscsi_boot_release(void *data) 4415 { 4416 struct beiscsi_hba *phba = data; 4417 4418 scsi_host_put(phba->shost); 4419 } 4420 4421 static int beiscsi_setup_boot_info(struct beiscsi_hba *phba) 4422 { 4423 struct iscsi_boot_kobj *boot_kobj; 4424 4425 /* it has been created previously */ 4426 if (phba->boot_kset) 4427 return 0; 4428 4429 /* get boot info using mgmt cmd */ 4430 if (beiscsi_get_boot_info(phba)) 4431 /* Try to see if we can carry on without this */ 4432 return 0; 4433 4434 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no); 4435 if (!phba->boot_kset) 4436 return -ENOMEM; 4437 4438 /* get a ref because the show function will ref the phba */ 4439 if (!scsi_host_get(phba->shost)) 4440 goto free_kset; 4441 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba, 4442 beiscsi_show_boot_tgt_info, 4443 beiscsi_tgt_get_attr_visibility, 4444 beiscsi_boot_release); 4445 if (!boot_kobj) 4446 goto put_shost; 4447 4448 if (!scsi_host_get(phba->shost)) 4449 goto free_kset; 4450 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba, 4451 beiscsi_show_boot_ini_info, 4452 beiscsi_ini_get_attr_visibility, 4453 beiscsi_boot_release); 4454 if (!boot_kobj) 4455 goto put_shost; 4456 4457 if (!scsi_host_get(phba->shost)) 4458 goto free_kset; 4459 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba, 4460 beiscsi_show_boot_eth_info, 4461 beiscsi_eth_get_attr_visibility, 4462 beiscsi_boot_release); 4463 if (!boot_kobj) 4464 goto put_shost; 4465 return 0; 4466 4467 put_shost: 4468 scsi_host_put(phba->shost); 4469 free_kset: 4470 iscsi_boot_destroy_kset(phba->boot_kset); 4471 return -ENOMEM; 4472 } 4473 4474 static int beiscsi_init_port(struct beiscsi_hba *phba) 4475 { 4476 int ret; 4477 4478 ret = beiscsi_init_controller(phba); 4479 if (ret < 0) { 4480 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4481 "BM_%d : beiscsi_dev_probe - Failed in" 4482 "beiscsi_init_controller\n"); 4483 return ret; 4484 } 4485 ret = beiscsi_init_sgl_handle(phba); 4486 if (ret < 0) { 4487 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4488 "BM_%d : beiscsi_dev_probe - Failed in" 4489 "beiscsi_init_sgl_handle\n"); 4490 goto do_cleanup_ctrlr; 4491 } 4492 4493 if (hba_setup_cid_tbls(phba)) { 4494 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 4495 "BM_%d : Failed in hba_setup_cid_tbls\n"); 4496 kfree(phba->io_sgl_hndl_base); 4497 kfree(phba->eh_sgl_hndl_base); 4498 goto do_cleanup_ctrlr; 4499 } 4500 4501 return ret; 4502 4503 do_cleanup_ctrlr: 4504 hwi_cleanup(phba); 4505 return ret; 4506 } 4507 4508 static void hwi_purge_eq(struct beiscsi_hba *phba) 4509 { 4510 struct hwi_controller *phwi_ctrlr; 4511 struct hwi_context_memory *phwi_context; 4512 struct be_queue_info *eq; 4513 struct be_eq_entry *eqe = NULL; 4514 int i, eq_msix; 4515 unsigned int num_processed; 4516 4517 phwi_ctrlr = phba->phwi_ctrlr; 4518 phwi_context = phwi_ctrlr->phwi_ctxt; 4519 if (phba->msix_enabled) 4520 eq_msix = 1; 4521 else 4522 eq_msix = 0; 4523 4524 for (i = 0; i < (phba->num_cpus + eq_msix); i++) { 4525 eq = &phwi_context->be_eq[i].q; 4526 eqe = queue_tail_node(eq); 4527 num_processed = 0; 4528 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] 4529 & EQE_VALID_MASK) { 4530 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); 4531 queue_tail_inc(eq); 4532 eqe = queue_tail_node(eq); 4533 num_processed++; 4534 } 4535 4536 if (num_processed) 4537 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1); 4538 } 4539 } 4540 4541 static void beiscsi_clean_port(struct beiscsi_hba *phba) 4542 { 4543 int mgmt_status, ulp_num; 4544 struct ulp_cid_info *ptr_cid_info = NULL; 4545 4546 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4547 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4548 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num); 4549 if (mgmt_status) 4550 beiscsi_log(phba, KERN_WARNING, 4551 BEISCSI_LOG_INIT, 4552 "BM_%d : mgmt_epfw_cleanup FAILED" 4553 " for ULP_%d\n", ulp_num); 4554 } 4555 } 4556 4557 hwi_purge_eq(phba); 4558 hwi_cleanup(phba); 4559 kfree(phba->io_sgl_hndl_base); 4560 kfree(phba->eh_sgl_hndl_base); 4561 kfree(phba->ep_array); 4562 kfree(phba->conn_table); 4563 4564 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) { 4565 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) { 4566 ptr_cid_info = phba->cid_array_info[ulp_num]; 4567 4568 if (ptr_cid_info) { 4569 kfree(ptr_cid_info->cid_array); 4570 kfree(ptr_cid_info); 4571 phba->cid_array_info[ulp_num] = NULL; 4572 } 4573 } 4574 } 4575 4576 } 4577 4578 /** 4579 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources 4580 * @beiscsi_conn: ptr to the conn to be cleaned up 4581 * @task: ptr to iscsi_task resource to be freed. 4582 * 4583 * Free driver mgmt resources binded to CXN. 4584 **/ 4585 void 4586 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, 4587 struct iscsi_task *task) 4588 { 4589 struct beiscsi_io_task *io_task; 4590 struct beiscsi_hba *phba = beiscsi_conn->phba; 4591 struct hwi_wrb_context *pwrb_context; 4592 struct hwi_controller *phwi_ctrlr; 4593 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4594 beiscsi_conn->beiscsi_conn_cid); 4595 4596 phwi_ctrlr = phba->phwi_ctrlr; 4597 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4598 4599 io_task = task->dd_data; 4600 4601 if (io_task->pwrb_handle) { 4602 memset(io_task->pwrb_handle->pwrb, 0, 4603 sizeof(struct iscsi_wrb)); 4604 free_wrb_handle(phba, pwrb_context, 4605 io_task->pwrb_handle); 4606 io_task->pwrb_handle = NULL; 4607 } 4608 4609 if (io_task->psgl_handle) { 4610 spin_lock_bh(&phba->mgmt_sgl_lock); 4611 free_mgmt_sgl_handle(phba, 4612 io_task->psgl_handle); 4613 io_task->psgl_handle = NULL; 4614 spin_unlock_bh(&phba->mgmt_sgl_lock); 4615 } 4616 4617 if (io_task->mtask_addr) { 4618 pci_unmap_single(phba->pcidev, 4619 io_task->mtask_addr, 4620 io_task->mtask_data_count, 4621 PCI_DMA_TODEVICE); 4622 io_task->mtask_addr = 0; 4623 } 4624 } 4625 4626 /** 4627 * beiscsi_cleanup_task()- Free driver resources of the task 4628 * @task: ptr to the iscsi task 4629 * 4630 **/ 4631 static void beiscsi_cleanup_task(struct iscsi_task *task) 4632 { 4633 struct beiscsi_io_task *io_task = task->dd_data; 4634 struct iscsi_conn *conn = task->conn; 4635 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4636 struct beiscsi_hba *phba = beiscsi_conn->phba; 4637 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4638 struct hwi_wrb_context *pwrb_context; 4639 struct hwi_controller *phwi_ctrlr; 4640 uint16_t cri_index = BE_GET_CRI_FROM_CID( 4641 beiscsi_conn->beiscsi_conn_cid); 4642 4643 phwi_ctrlr = phba->phwi_ctrlr; 4644 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4645 4646 if (io_task->cmd_bhs) { 4647 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4648 io_task->bhs_pa.u.a64.address); 4649 io_task->cmd_bhs = NULL; 4650 } 4651 4652 if (task->sc) { 4653 if (io_task->pwrb_handle) { 4654 free_wrb_handle(phba, pwrb_context, 4655 io_task->pwrb_handle); 4656 io_task->pwrb_handle = NULL; 4657 } 4658 4659 if (io_task->psgl_handle) { 4660 spin_lock(&phba->io_sgl_lock); 4661 free_io_sgl_handle(phba, io_task->psgl_handle); 4662 spin_unlock(&phba->io_sgl_lock); 4663 io_task->psgl_handle = NULL; 4664 } 4665 4666 if (io_task->scsi_cmnd) { 4667 scsi_dma_unmap(io_task->scsi_cmnd); 4668 io_task->scsi_cmnd = NULL; 4669 } 4670 } else { 4671 if (!beiscsi_conn->login_in_progress) 4672 beiscsi_free_mgmt_task_handles(beiscsi_conn, task); 4673 } 4674 } 4675 4676 void 4677 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, 4678 struct beiscsi_offload_params *params) 4679 { 4680 struct wrb_handle *pwrb_handle; 4681 struct beiscsi_hba *phba = beiscsi_conn->phba; 4682 struct iscsi_task *task = beiscsi_conn->task; 4683 struct iscsi_session *session = task->conn->session; 4684 u32 doorbell = 0; 4685 4686 /* 4687 * We can always use 0 here because it is reserved by libiscsi for 4688 * login/startup related tasks. 4689 */ 4690 beiscsi_conn->login_in_progress = 0; 4691 spin_lock_bh(&session->back_lock); 4692 beiscsi_cleanup_task(task); 4693 spin_unlock_bh(&session->back_lock); 4694 4695 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); 4696 4697 /* Check for the adapter family */ 4698 if (is_chip_be2_be3r(phba)) 4699 beiscsi_offload_cxn_v0(params, pwrb_handle, 4700 phba->init_mem); 4701 else 4702 beiscsi_offload_cxn_v2(params, pwrb_handle); 4703 4704 be_dws_le_to_cpu(pwrb_handle->pwrb, 4705 sizeof(struct iscsi_target_context_update_wrb)); 4706 4707 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4708 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK) 4709 << DB_DEF_PDU_WRB_INDEX_SHIFT; 4710 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4711 iowrite32(doorbell, phba->db_va + 4712 beiscsi_conn->doorbell_offset); 4713 } 4714 4715 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt, 4716 int *index, int *age) 4717 { 4718 *index = (int)itt; 4719 if (age) 4720 *age = conn->session->age; 4721 } 4722 4723 /** 4724 * beiscsi_alloc_pdu - allocates pdu and related resources 4725 * @task: libiscsi task 4726 * @opcode: opcode of pdu for task 4727 * 4728 * This is called with the session lock held. It will allocate 4729 * the wrb and sgl if needed for the command. And it will prep 4730 * the pdu's itt. beiscsi_parse_pdu will later translate 4731 * the pdu itt to the libiscsi task itt. 4732 */ 4733 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) 4734 { 4735 struct beiscsi_io_task *io_task = task->dd_data; 4736 struct iscsi_conn *conn = task->conn; 4737 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4738 struct beiscsi_hba *phba = beiscsi_conn->phba; 4739 struct hwi_wrb_context *pwrb_context; 4740 struct hwi_controller *phwi_ctrlr; 4741 itt_t itt; 4742 uint16_t cri_index = 0; 4743 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; 4744 dma_addr_t paddr; 4745 4746 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool, 4747 GFP_ATOMIC, &paddr); 4748 if (!io_task->cmd_bhs) 4749 return -ENOMEM; 4750 io_task->bhs_pa.u.a64.address = paddr; 4751 io_task->libiscsi_itt = (itt_t)task->itt; 4752 io_task->conn = beiscsi_conn; 4753 4754 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr; 4755 task->hdr_max = sizeof(struct be_cmd_bhs); 4756 io_task->psgl_handle = NULL; 4757 io_task->pwrb_handle = NULL; 4758 4759 if (task->sc) { 4760 spin_lock(&phba->io_sgl_lock); 4761 io_task->psgl_handle = alloc_io_sgl_handle(phba); 4762 spin_unlock(&phba->io_sgl_lock); 4763 if (!io_task->psgl_handle) { 4764 beiscsi_log(phba, KERN_ERR, 4765 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4766 "BM_%d : Alloc of IO_SGL_ICD Failed" 4767 "for the CID : %d\n", 4768 beiscsi_conn->beiscsi_conn_cid); 4769 goto free_hndls; 4770 } 4771 io_task->pwrb_handle = alloc_wrb_handle(phba, 4772 beiscsi_conn->beiscsi_conn_cid); 4773 if (!io_task->pwrb_handle) { 4774 beiscsi_log(phba, KERN_ERR, 4775 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4776 "BM_%d : Alloc of WRB_HANDLE Failed" 4777 "for the CID : %d\n", 4778 beiscsi_conn->beiscsi_conn_cid); 4779 goto free_io_hndls; 4780 } 4781 } else { 4782 io_task->scsi_cmnd = NULL; 4783 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { 4784 beiscsi_conn->task = task; 4785 if (!beiscsi_conn->login_in_progress) { 4786 spin_lock(&phba->mgmt_sgl_lock); 4787 io_task->psgl_handle = (struct sgl_handle *) 4788 alloc_mgmt_sgl_handle(phba); 4789 spin_unlock(&phba->mgmt_sgl_lock); 4790 if (!io_task->psgl_handle) { 4791 beiscsi_log(phba, KERN_ERR, 4792 BEISCSI_LOG_IO | 4793 BEISCSI_LOG_CONFIG, 4794 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4795 "for the CID : %d\n", 4796 beiscsi_conn-> 4797 beiscsi_conn_cid); 4798 goto free_hndls; 4799 } 4800 4801 beiscsi_conn->login_in_progress = 1; 4802 beiscsi_conn->plogin_sgl_handle = 4803 io_task->psgl_handle; 4804 io_task->pwrb_handle = 4805 alloc_wrb_handle(phba, 4806 beiscsi_conn->beiscsi_conn_cid); 4807 if (!io_task->pwrb_handle) { 4808 beiscsi_log(phba, KERN_ERR, 4809 BEISCSI_LOG_IO | 4810 BEISCSI_LOG_CONFIG, 4811 "BM_%d : Alloc of WRB_HANDLE Failed" 4812 "for the CID : %d\n", 4813 beiscsi_conn-> 4814 beiscsi_conn_cid); 4815 goto free_mgmt_hndls; 4816 } 4817 beiscsi_conn->plogin_wrb_handle = 4818 io_task->pwrb_handle; 4819 4820 } else { 4821 io_task->psgl_handle = 4822 beiscsi_conn->plogin_sgl_handle; 4823 io_task->pwrb_handle = 4824 beiscsi_conn->plogin_wrb_handle; 4825 } 4826 } else { 4827 spin_lock(&phba->mgmt_sgl_lock); 4828 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); 4829 spin_unlock(&phba->mgmt_sgl_lock); 4830 if (!io_task->psgl_handle) { 4831 beiscsi_log(phba, KERN_ERR, 4832 BEISCSI_LOG_IO | 4833 BEISCSI_LOG_CONFIG, 4834 "BM_%d : Alloc of MGMT_SGL_ICD Failed" 4835 "for the CID : %d\n", 4836 beiscsi_conn-> 4837 beiscsi_conn_cid); 4838 goto free_hndls; 4839 } 4840 io_task->pwrb_handle = 4841 alloc_wrb_handle(phba, 4842 beiscsi_conn->beiscsi_conn_cid); 4843 if (!io_task->pwrb_handle) { 4844 beiscsi_log(phba, KERN_ERR, 4845 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, 4846 "BM_%d : Alloc of WRB_HANDLE Failed" 4847 "for the CID : %d\n", 4848 beiscsi_conn->beiscsi_conn_cid); 4849 goto free_mgmt_hndls; 4850 } 4851 4852 } 4853 } 4854 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle-> 4855 wrb_index << 16) | (unsigned int) 4856 (io_task->psgl_handle->sgl_index)); 4857 io_task->pwrb_handle->pio_handle = task; 4858 4859 io_task->cmd_bhs->iscsi_hdr.itt = itt; 4860 return 0; 4861 4862 free_io_hndls: 4863 spin_lock(&phba->io_sgl_lock); 4864 free_io_sgl_handle(phba, io_task->psgl_handle); 4865 spin_unlock(&phba->io_sgl_lock); 4866 goto free_hndls; 4867 free_mgmt_hndls: 4868 spin_lock(&phba->mgmt_sgl_lock); 4869 free_mgmt_sgl_handle(phba, io_task->psgl_handle); 4870 io_task->psgl_handle = NULL; 4871 spin_unlock(&phba->mgmt_sgl_lock); 4872 free_hndls: 4873 phwi_ctrlr = phba->phwi_ctrlr; 4874 cri_index = BE_GET_CRI_FROM_CID( 4875 beiscsi_conn->beiscsi_conn_cid); 4876 pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; 4877 if (io_task->pwrb_handle) 4878 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); 4879 io_task->pwrb_handle = NULL; 4880 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, 4881 io_task->bhs_pa.u.a64.address); 4882 io_task->cmd_bhs = NULL; 4883 return -ENOMEM; 4884 } 4885 int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, 4886 unsigned int num_sg, unsigned int xferlen, 4887 unsigned int writedir) 4888 { 4889 4890 struct beiscsi_io_task *io_task = task->dd_data; 4891 struct iscsi_conn *conn = task->conn; 4892 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4893 struct beiscsi_hba *phba = beiscsi_conn->phba; 4894 struct iscsi_wrb *pwrb = NULL; 4895 unsigned int doorbell = 0; 4896 4897 pwrb = io_task->pwrb_handle->pwrb; 4898 4899 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4900 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4901 4902 if (writedir) { 4903 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4904 INI_WR_CMD); 4905 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1); 4906 } else { 4907 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb, 4908 INI_RD_CMD); 4909 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0); 4910 } 4911 4912 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2, 4913 type, pwrb); 4914 4915 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb, 4916 cpu_to_be16(*(unsigned short *) 4917 &io_task->cmd_bhs->iscsi_hdr.lun)); 4918 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen); 4919 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 4920 io_task->pwrb_handle->wrb_index); 4921 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 4922 be32_to_cpu(task->cmdsn)); 4923 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 4924 io_task->psgl_handle->sgl_index); 4925 4926 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task); 4927 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 4928 io_task->pwrb_handle->nxt_wrb_index); 4929 4930 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4931 4932 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4933 doorbell |= (io_task->pwrb_handle->wrb_index & 4934 DB_DEF_PDU_WRB_INDEX_MASK) << 4935 DB_DEF_PDU_WRB_INDEX_SHIFT; 4936 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4937 iowrite32(doorbell, phba->db_va + 4938 beiscsi_conn->doorbell_offset); 4939 return 0; 4940 } 4941 4942 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg, 4943 unsigned int num_sg, unsigned int xferlen, 4944 unsigned int writedir) 4945 { 4946 4947 struct beiscsi_io_task *io_task = task->dd_data; 4948 struct iscsi_conn *conn = task->conn; 4949 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 4950 struct beiscsi_hba *phba = beiscsi_conn->phba; 4951 struct iscsi_wrb *pwrb = NULL; 4952 unsigned int doorbell = 0; 4953 4954 pwrb = io_task->pwrb_handle->pwrb; 4955 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; 4956 io_task->bhs_len = sizeof(struct be_cmd_bhs); 4957 4958 if (writedir) { 4959 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4960 INI_WR_CMD); 4961 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1); 4962 } else { 4963 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, 4964 INI_RD_CMD); 4965 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0); 4966 } 4967 4968 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb, 4969 type, pwrb); 4970 4971 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb, 4972 cpu_to_be16(*(unsigned short *) 4973 &io_task->cmd_bhs->iscsi_hdr.lun)); 4974 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen); 4975 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 4976 io_task->pwrb_handle->wrb_index); 4977 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 4978 be32_to_cpu(task->cmdsn)); 4979 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 4980 io_task->psgl_handle->sgl_index); 4981 4982 hwi_write_sgl(pwrb, sg, num_sg, io_task); 4983 4984 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 4985 io_task->pwrb_handle->nxt_wrb_index); 4986 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb)); 4987 4988 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK; 4989 doorbell |= (io_task->pwrb_handle->wrb_index & 4990 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 4991 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 4992 4993 iowrite32(doorbell, phba->db_va + 4994 beiscsi_conn->doorbell_offset); 4995 return 0; 4996 } 4997 4998 static int beiscsi_mtask(struct iscsi_task *task) 4999 { 5000 struct beiscsi_io_task *io_task = task->dd_data; 5001 struct iscsi_conn *conn = task->conn; 5002 struct beiscsi_conn *beiscsi_conn = conn->dd_data; 5003 struct beiscsi_hba *phba = beiscsi_conn->phba; 5004 struct iscsi_wrb *pwrb = NULL; 5005 unsigned int doorbell = 0; 5006 unsigned int cid; 5007 unsigned int pwrb_typeoffset = 0; 5008 5009 cid = beiscsi_conn->beiscsi_conn_cid; 5010 pwrb = io_task->pwrb_handle->pwrb; 5011 memset(pwrb, 0, sizeof(*pwrb)); 5012 5013 if (is_chip_be2_be3r(phba)) { 5014 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 5015 be32_to_cpu(task->cmdsn)); 5016 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, 5017 io_task->pwrb_handle->wrb_index); 5018 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb, 5019 io_task->psgl_handle->sgl_index); 5020 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, 5021 task->data_count); 5022 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, 5023 io_task->pwrb_handle->nxt_wrb_index); 5024 pwrb_typeoffset = BE_WRB_TYPE_OFFSET; 5025 } else { 5026 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, 5027 be32_to_cpu(task->cmdsn)); 5028 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, 5029 io_task->pwrb_handle->wrb_index); 5030 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, 5031 io_task->psgl_handle->sgl_index); 5032 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, 5033 task->data_count); 5034 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, 5035 io_task->pwrb_handle->nxt_wrb_index); 5036 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; 5037 } 5038 5039 5040 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) { 5041 case ISCSI_OP_LOGIN: 5042 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1); 5043 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5044 hwi_write_buffer(pwrb, task); 5045 break; 5046 case ISCSI_OP_NOOP_OUT: 5047 if (task->hdr->ttt != ISCSI_RESERVED_TAG) { 5048 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5049 if (is_chip_be2_be3r(phba)) 5050 AMAP_SET_BITS(struct amap_iscsi_wrb, 5051 dmsg, pwrb, 1); 5052 else 5053 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5054 dmsg, pwrb, 1); 5055 } else { 5056 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); 5057 if (is_chip_be2_be3r(phba)) 5058 AMAP_SET_BITS(struct amap_iscsi_wrb, 5059 dmsg, pwrb, 0); 5060 else 5061 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, 5062 dmsg, pwrb, 0); 5063 } 5064 hwi_write_buffer(pwrb, task); 5065 break; 5066 case ISCSI_OP_TEXT: 5067 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); 5068 hwi_write_buffer(pwrb, task); 5069 break; 5070 case ISCSI_OP_SCSI_TMFUNC: 5071 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset); 5072 hwi_write_buffer(pwrb, task); 5073 break; 5074 case ISCSI_OP_LOGOUT: 5075 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset); 5076 hwi_write_buffer(pwrb, task); 5077 break; 5078 5079 default: 5080 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5081 "BM_%d : opcode =%d Not supported\n", 5082 task->hdr->opcode & ISCSI_OPCODE_MASK); 5083 5084 return -EINVAL; 5085 } 5086 5087 /* Set the task type */ 5088 io_task->wrb_type = (is_chip_be2_be3r(phba)) ? 5089 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : 5090 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); 5091 5092 doorbell |= cid & DB_WRB_POST_CID_MASK; 5093 doorbell |= (io_task->pwrb_handle->wrb_index & 5094 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT; 5095 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT; 5096 iowrite32(doorbell, phba->db_va + 5097 beiscsi_conn->doorbell_offset); 5098 return 0; 5099 } 5100 5101 static int beiscsi_task_xmit(struct iscsi_task *task) 5102 { 5103 struct beiscsi_io_task *io_task = task->dd_data; 5104 struct scsi_cmnd *sc = task->sc; 5105 struct beiscsi_hba *phba = NULL; 5106 struct scatterlist *sg; 5107 int num_sg; 5108 unsigned int writedir = 0, xferlen = 0; 5109 5110 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba; 5111 5112 if (!sc) 5113 return beiscsi_mtask(task); 5114 5115 io_task->scsi_cmnd = sc; 5116 num_sg = scsi_dma_map(sc); 5117 if (num_sg < 0) { 5118 struct iscsi_conn *conn = task->conn; 5119 struct beiscsi_hba *phba = NULL; 5120 5121 phba = ((struct beiscsi_conn *)conn->dd_data)->phba; 5122 beiscsi_log(phba, KERN_ERR, 5123 BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI, 5124 "BM_%d : scsi_dma_map Failed " 5125 "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n", 5126 be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt), 5127 io_task->libiscsi_itt, scsi_bufflen(sc)); 5128 5129 return num_sg; 5130 } 5131 xferlen = scsi_bufflen(sc); 5132 sg = scsi_sglist(sc); 5133 if (sc->sc_data_direction == DMA_TO_DEVICE) 5134 writedir = 1; 5135 else 5136 writedir = 0; 5137 5138 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir); 5139 } 5140 5141 /** 5142 * beiscsi_bsg_request - handle bsg request from ISCSI transport 5143 * @job: job to handle 5144 */ 5145 static int beiscsi_bsg_request(struct bsg_job *job) 5146 { 5147 struct Scsi_Host *shost; 5148 struct beiscsi_hba *phba; 5149 struct iscsi_bsg_request *bsg_req = job->request; 5150 int rc = -EINVAL; 5151 unsigned int tag; 5152 struct be_dma_mem nonemb_cmd; 5153 struct be_cmd_resp_hdr *resp; 5154 struct iscsi_bsg_reply *bsg_reply = job->reply; 5155 unsigned short status, extd_status; 5156 5157 shost = iscsi_job_to_shost(job); 5158 phba = iscsi_host_priv(shost); 5159 5160 switch (bsg_req->msgcode) { 5161 case ISCSI_BSG_HST_VENDOR: 5162 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, 5163 job->request_payload.payload_len, 5164 &nonemb_cmd.dma); 5165 if (nonemb_cmd.va == NULL) { 5166 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5167 "BM_%d : Failed to allocate memory for " 5168 "beiscsi_bsg_request\n"); 5169 return -ENOMEM; 5170 } 5171 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job, 5172 &nonemb_cmd); 5173 if (!tag) { 5174 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5175 "BM_%d : MBX Tag Allocation Failed\n"); 5176 5177 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5178 nonemb_cmd.va, nonemb_cmd.dma); 5179 return -EAGAIN; 5180 } 5181 5182 rc = wait_event_interruptible_timeout( 5183 phba->ctrl.mcc_wait[tag], 5184 phba->ctrl.mcc_numtag[tag], 5185 msecs_to_jiffies( 5186 BEISCSI_HOST_MBX_TIMEOUT)); 5187 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8; 5188 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF; 5189 free_mcc_tag(&phba->ctrl, tag); 5190 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va; 5191 sg_copy_from_buffer(job->reply_payload.sg_list, 5192 job->reply_payload.sg_cnt, 5193 nonemb_cmd.va, (resp->response_length 5194 + sizeof(*resp))); 5195 bsg_reply->reply_payload_rcv_len = resp->response_length; 5196 bsg_reply->result = status; 5197 bsg_job_done(job, bsg_reply->result, 5198 bsg_reply->reply_payload_rcv_len); 5199 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, 5200 nonemb_cmd.va, nonemb_cmd.dma); 5201 if (status || extd_status) { 5202 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5203 "BM_%d : MBX Cmd Failed" 5204 " status = %d extd_status = %d\n", 5205 status, extd_status); 5206 5207 return -EIO; 5208 } else { 5209 rc = 0; 5210 } 5211 break; 5212 5213 default: 5214 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, 5215 "BM_%d : Unsupported bsg command: 0x%x\n", 5216 bsg_req->msgcode); 5217 break; 5218 } 5219 5220 return rc; 5221 } 5222 5223 void beiscsi_hba_attrs_init(struct beiscsi_hba *phba) 5224 { 5225 /* Set the logging parameter */ 5226 beiscsi_log_enable_init(phba, beiscsi_log_enable); 5227 } 5228 5229 /* 5230 * beiscsi_quiesce()- Cleanup Driver resources 5231 * @phba: Instance Priv structure 5232 * @unload_state:i Clean or EEH unload state 5233 * 5234 * Free the OS and HW resources held by the driver 5235 **/ 5236 static void beiscsi_quiesce(struct beiscsi_hba *phba, 5237 uint32_t unload_state) 5238 { 5239 struct hwi_controller *phwi_ctrlr; 5240 struct hwi_context_memory *phwi_context; 5241 struct be_eq_obj *pbe_eq; 5242 unsigned int i, msix_vec; 5243 5244 phwi_ctrlr = phba->phwi_ctrlr; 5245 phwi_context = phwi_ctrlr->phwi_ctxt; 5246 hwi_disable_intr(phba); 5247 if (phba->msix_enabled) { 5248 for (i = 0; i <= phba->num_cpus; i++) { 5249 msix_vec = phba->msix_entries[i].vector; 5250 synchronize_irq(msix_vec); 5251 free_irq(msix_vec, &phwi_context->be_eq[i]); 5252 kfree(phba->msi_name[i]); 5253 } 5254 } else 5255 if (phba->pcidev->irq) { 5256 synchronize_irq(phba->pcidev->irq); 5257 free_irq(phba->pcidev->irq, phba); 5258 } 5259 pci_disable_msix(phba->pcidev); 5260 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task); 5261 5262 for (i = 0; i < phba->num_cpus; i++) { 5263 pbe_eq = &phwi_context->be_eq[i]; 5264 blk_iopoll_disable(&pbe_eq->iopoll); 5265 } 5266 5267 if (unload_state == BEISCSI_CLEAN_UNLOAD) { 5268 destroy_workqueue(phba->wq); 5269 beiscsi_clean_port(phba); 5270 beiscsi_free_mem(phba); 5271 5272 beiscsi_unmap_pci_function(phba); 5273 pci_free_consistent(phba->pcidev, 5274 phba->ctrl.mbox_mem_alloced.size, 5275 phba->ctrl.mbox_mem_alloced.va, 5276 phba->ctrl.mbox_mem_alloced.dma); 5277 } else { 5278 hwi_purge_eq(phba); 5279 hwi_cleanup(phba); 5280 } 5281 5282 } 5283 5284 static void beiscsi_remove(struct pci_dev *pcidev) 5285 { 5286 5287 struct beiscsi_hba *phba = NULL; 5288 5289 phba = pci_get_drvdata(pcidev); 5290 if (!phba) { 5291 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); 5292 return; 5293 } 5294 5295 beiscsi_destroy_def_ifaces(phba); 5296 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5297 iscsi_boot_destroy_kset(phba->boot_kset); 5298 iscsi_host_remove(phba->shost); 5299 pci_dev_put(phba->pcidev); 5300 iscsi_host_free(phba->shost); 5301 pci_disable_pcie_error_reporting(pcidev); 5302 pci_set_drvdata(pcidev, NULL); 5303 pci_release_regions(pcidev); 5304 pci_disable_device(pcidev); 5305 } 5306 5307 static void beiscsi_shutdown(struct pci_dev *pcidev) 5308 { 5309 5310 struct beiscsi_hba *phba = NULL; 5311 5312 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); 5313 if (!phba) { 5314 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); 5315 return; 5316 } 5317 5318 phba->state = BE_ADAPTER_STATE_SHUTDOWN; 5319 iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); 5320 beiscsi_quiesce(phba, BEISCSI_CLEAN_UNLOAD); 5321 pci_disable_device(pcidev); 5322 } 5323 5324 static void beiscsi_msix_enable(struct beiscsi_hba *phba) 5325 { 5326 int i, status; 5327 5328 for (i = 0; i <= phba->num_cpus; i++) 5329 phba->msix_entries[i].entry = i; 5330 5331 status = pci_enable_msix_range(phba->pcidev, phba->msix_entries, 5332 phba->num_cpus + 1, phba->num_cpus + 1); 5333 if (status > 0) 5334 phba->msix_enabled = true; 5335 5336 return; 5337 } 5338 5339 static void be_eqd_update(struct beiscsi_hba *phba) 5340 { 5341 struct be_set_eqd set_eqd[MAX_CPUS]; 5342 struct be_aic_obj *aic; 5343 struct be_eq_obj *pbe_eq; 5344 struct hwi_controller *phwi_ctrlr; 5345 struct hwi_context_memory *phwi_context; 5346 int eqd, i, num = 0; 5347 ulong now; 5348 u32 pps, delta; 5349 unsigned int tag; 5350 5351 phwi_ctrlr = phba->phwi_ctrlr; 5352 phwi_context = phwi_ctrlr->phwi_ctxt; 5353 5354 for (i = 0; i <= phba->num_cpus; i++) { 5355 aic = &phba->aic_obj[i]; 5356 pbe_eq = &phwi_context->be_eq[i]; 5357 now = jiffies; 5358 if (!aic->jiffs || time_before(now, aic->jiffs) || 5359 pbe_eq->cq_count < aic->eq_prev) { 5360 aic->jiffs = now; 5361 aic->eq_prev = pbe_eq->cq_count; 5362 continue; 5363 } 5364 delta = jiffies_to_msecs(now - aic->jiffs); 5365 pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta); 5366 eqd = (pps / 1500) << 2; 5367 5368 if (eqd < 8) 5369 eqd = 0; 5370 eqd = min_t(u32, eqd, phwi_context->max_eqd); 5371 eqd = max_t(u32, eqd, phwi_context->min_eqd); 5372 5373 aic->jiffs = now; 5374 aic->eq_prev = pbe_eq->cq_count; 5375 5376 if (eqd != aic->prev_eqd) { 5377 set_eqd[num].delay_multiplier = (eqd * 65)/100; 5378 set_eqd[num].eq_id = pbe_eq->q.id; 5379 aic->prev_eqd = eqd; 5380 num++; 5381 } 5382 } 5383 if (num) { 5384 tag = be_cmd_modify_eq_delay(phba, set_eqd, num); 5385 if (tag) 5386 beiscsi_mccq_compl(phba, tag, NULL, NULL); 5387 } 5388 } 5389 5390 static void be_check_boot_session(struct beiscsi_hba *phba) 5391 { 5392 if (beiscsi_setup_boot_info(phba)) 5393 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5394 "BM_%d : Could not set up " 5395 "iSCSI boot info on async event.\n"); 5396 } 5397 5398 /* 5399 * beiscsi_hw_health_check()- Check adapter health 5400 * @work: work item to check HW health 5401 * 5402 * Check if adapter in an unrecoverable state or not. 5403 **/ 5404 static void 5405 beiscsi_hw_health_check(struct work_struct *work) 5406 { 5407 struct beiscsi_hba *phba = 5408 container_of(work, struct beiscsi_hba, 5409 beiscsi_hw_check_task.work); 5410 5411 be_eqd_update(phba); 5412 5413 if (phba->state & BE_ADAPTER_CHECK_BOOT) { 5414 if ((phba->get_boot > 0) && (!phba->boot_kset)) { 5415 phba->get_boot--; 5416 if (!(phba->get_boot % BE_GET_BOOT_TO)) 5417 be_check_boot_session(phba); 5418 } else { 5419 phba->state &= ~BE_ADAPTER_CHECK_BOOT; 5420 phba->get_boot = 0; 5421 } 5422 } 5423 5424 beiscsi_ue_detect(phba); 5425 5426 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5427 msecs_to_jiffies(1000)); 5428 } 5429 5430 5431 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev, 5432 pci_channel_state_t state) 5433 { 5434 struct beiscsi_hba *phba = NULL; 5435 5436 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5437 phba->state |= BE_ADAPTER_PCI_ERR; 5438 5439 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5440 "BM_%d : EEH error detected\n"); 5441 5442 beiscsi_quiesce(phba, BEISCSI_EEH_UNLOAD); 5443 5444 if (state == pci_channel_io_perm_failure) { 5445 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5446 "BM_%d : EEH : State PERM Failure"); 5447 return PCI_ERS_RESULT_DISCONNECT; 5448 } 5449 5450 pci_disable_device(pdev); 5451 5452 /* The error could cause the FW to trigger a flash debug dump. 5453 * Resetting the card while flash dump is in progress 5454 * can cause it not to recover; wait for it to finish. 5455 * Wait only for first function as it is needed only once per 5456 * adapter. 5457 **/ 5458 if (pdev->devfn == 0) 5459 ssleep(30); 5460 5461 return PCI_ERS_RESULT_NEED_RESET; 5462 } 5463 5464 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev) 5465 { 5466 struct beiscsi_hba *phba = NULL; 5467 int status = 0; 5468 5469 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5470 5471 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5472 "BM_%d : EEH Reset\n"); 5473 5474 status = pci_enable_device(pdev); 5475 if (status) 5476 return PCI_ERS_RESULT_DISCONNECT; 5477 5478 pci_set_master(pdev); 5479 pci_set_power_state(pdev, PCI_D0); 5480 pci_restore_state(pdev); 5481 5482 /* Wait for the CHIP Reset to complete */ 5483 status = be_chk_reset_complete(phba); 5484 if (!status) { 5485 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5486 "BM_%d : EEH Reset Completed\n"); 5487 } else { 5488 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5489 "BM_%d : EEH Reset Completion Failure\n"); 5490 return PCI_ERS_RESULT_DISCONNECT; 5491 } 5492 5493 pci_cleanup_aer_uncorrect_error_status(pdev); 5494 return PCI_ERS_RESULT_RECOVERED; 5495 } 5496 5497 static void beiscsi_eeh_resume(struct pci_dev *pdev) 5498 { 5499 int ret = 0, i; 5500 struct be_eq_obj *pbe_eq; 5501 struct beiscsi_hba *phba = NULL; 5502 struct hwi_controller *phwi_ctrlr; 5503 struct hwi_context_memory *phwi_context; 5504 5505 phba = (struct beiscsi_hba *)pci_get_drvdata(pdev); 5506 pci_save_state(pdev); 5507 5508 if (enable_msix) 5509 find_num_cpus(phba); 5510 else 5511 phba->num_cpus = 1; 5512 5513 if (enable_msix) { 5514 beiscsi_msix_enable(phba); 5515 if (!phba->msix_enabled) 5516 phba->num_cpus = 1; 5517 } 5518 5519 ret = beiscsi_cmd_reset_function(phba); 5520 if (ret) { 5521 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5522 "BM_%d : Reset Failed\n"); 5523 goto ret_err; 5524 } 5525 5526 ret = be_chk_reset_complete(phba); 5527 if (ret) { 5528 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5529 "BM_%d : Failed to get out of reset.\n"); 5530 goto ret_err; 5531 } 5532 5533 beiscsi_get_params(phba); 5534 phba->shost->max_id = phba->params.cxns_per_ctrl; 5535 phba->shost->can_queue = phba->params.ios_per_ctrl; 5536 ret = hwi_init_controller(phba); 5537 5538 for (i = 0; i < MAX_MCC_CMD; i++) { 5539 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5540 phba->ctrl.mcc_tag[i] = i + 1; 5541 phba->ctrl.mcc_numtag[i + 1] = 0; 5542 phba->ctrl.mcc_tag_available++; 5543 } 5544 5545 phwi_ctrlr = phba->phwi_ctrlr; 5546 phwi_context = phwi_ctrlr->phwi_ctxt; 5547 5548 for (i = 0; i < phba->num_cpus; i++) { 5549 pbe_eq = &phwi_context->be_eq[i]; 5550 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5551 be_iopoll); 5552 blk_iopoll_enable(&pbe_eq->iopoll); 5553 } 5554 5555 i = (phba->msix_enabled) ? i : 0; 5556 /* Work item for MCC handling */ 5557 pbe_eq = &phwi_context->be_eq[i]; 5558 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5559 5560 ret = beiscsi_init_irqs(phba); 5561 if (ret < 0) { 5562 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5563 "BM_%d : beiscsi_eeh_resume - " 5564 "Failed to beiscsi_init_irqs\n"); 5565 goto ret_err; 5566 } 5567 5568 hwi_enable_intr(phba); 5569 phba->state &= ~BE_ADAPTER_PCI_ERR; 5570 5571 return; 5572 ret_err: 5573 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5574 "BM_%d : AER EEH Resume Failed\n"); 5575 } 5576 5577 static int beiscsi_dev_probe(struct pci_dev *pcidev, 5578 const struct pci_device_id *id) 5579 { 5580 struct beiscsi_hba *phba = NULL; 5581 struct hwi_controller *phwi_ctrlr; 5582 struct hwi_context_memory *phwi_context; 5583 struct be_eq_obj *pbe_eq; 5584 int ret = 0, i; 5585 5586 ret = beiscsi_enable_pci(pcidev); 5587 if (ret < 0) { 5588 dev_err(&pcidev->dev, 5589 "beiscsi_dev_probe - Failed to enable pci device\n"); 5590 return ret; 5591 } 5592 5593 phba = beiscsi_hba_alloc(pcidev); 5594 if (!phba) { 5595 dev_err(&pcidev->dev, 5596 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n"); 5597 goto disable_pci; 5598 } 5599 5600 /* Enable EEH reporting */ 5601 ret = pci_enable_pcie_error_reporting(pcidev); 5602 if (ret) 5603 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT, 5604 "BM_%d : PCIe Error Reporting " 5605 "Enabling Failed\n"); 5606 5607 pci_save_state(pcidev); 5608 5609 /* Initialize Driver configuration Paramters */ 5610 beiscsi_hba_attrs_init(phba); 5611 5612 phba->fw_timeout = false; 5613 phba->mac_addr_set = false; 5614 5615 5616 switch (pcidev->device) { 5617 case BE_DEVICE_ID1: 5618 case OC_DEVICE_ID1: 5619 case OC_DEVICE_ID2: 5620 phba->generation = BE_GEN2; 5621 phba->iotask_fn = beiscsi_iotask; 5622 break; 5623 case BE_DEVICE_ID2: 5624 case OC_DEVICE_ID3: 5625 phba->generation = BE_GEN3; 5626 phba->iotask_fn = beiscsi_iotask; 5627 break; 5628 case OC_SKH_ID1: 5629 phba->generation = BE_GEN4; 5630 phba->iotask_fn = beiscsi_iotask_v2; 5631 break; 5632 default: 5633 phba->generation = 0; 5634 } 5635 5636 ret = be_ctrl_init(phba, pcidev); 5637 if (ret) { 5638 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5639 "BM_%d : beiscsi_dev_probe-" 5640 "Failed in be_ctrl_init\n"); 5641 goto hba_free; 5642 } 5643 5644 ret = beiscsi_cmd_reset_function(phba); 5645 if (ret) { 5646 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5647 "BM_%d : Reset Failed\n"); 5648 goto hba_free; 5649 } 5650 ret = be_chk_reset_complete(phba); 5651 if (ret) { 5652 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5653 "BM_%d : Failed to get out of reset.\n"); 5654 goto hba_free; 5655 } 5656 5657 spin_lock_init(&phba->io_sgl_lock); 5658 spin_lock_init(&phba->mgmt_sgl_lock); 5659 spin_lock_init(&phba->isr_lock); 5660 spin_lock_init(&phba->async_pdu_lock); 5661 ret = mgmt_get_fw_config(&phba->ctrl, phba); 5662 if (ret != 0) { 5663 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5664 "BM_%d : Error getting fw config\n"); 5665 goto free_port; 5666 } 5667 5668 if (enable_msix) 5669 find_num_cpus(phba); 5670 else 5671 phba->num_cpus = 1; 5672 5673 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5674 "BM_%d : num_cpus = %d\n", 5675 phba->num_cpus); 5676 5677 if (enable_msix) { 5678 beiscsi_msix_enable(phba); 5679 if (!phba->msix_enabled) 5680 phba->num_cpus = 1; 5681 } 5682 5683 phba->shost->max_id = phba->params.cxns_per_ctrl; 5684 beiscsi_get_params(phba); 5685 phba->shost->can_queue = phba->params.ios_per_ctrl; 5686 ret = beiscsi_init_port(phba); 5687 if (ret < 0) { 5688 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5689 "BM_%d : beiscsi_dev_probe-" 5690 "Failed in beiscsi_init_port\n"); 5691 goto free_port; 5692 } 5693 5694 for (i = 0; i < MAX_MCC_CMD; i++) { 5695 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]); 5696 phba->ctrl.mcc_tag[i] = i + 1; 5697 phba->ctrl.mcc_numtag[i + 1] = 0; 5698 phba->ctrl.mcc_tag_available++; 5699 memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0, 5700 sizeof(struct be_dma_mem)); 5701 } 5702 5703 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0; 5704 5705 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq", 5706 phba->shost->host_no); 5707 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name); 5708 if (!phba->wq) { 5709 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5710 "BM_%d : beiscsi_dev_probe-" 5711 "Failed to allocate work queue\n"); 5712 goto free_twq; 5713 } 5714 5715 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task, 5716 beiscsi_hw_health_check); 5717 5718 phwi_ctrlr = phba->phwi_ctrlr; 5719 phwi_context = phwi_ctrlr->phwi_ctxt; 5720 5721 for (i = 0; i < phba->num_cpus; i++) { 5722 pbe_eq = &phwi_context->be_eq[i]; 5723 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget, 5724 be_iopoll); 5725 blk_iopoll_enable(&pbe_eq->iopoll); 5726 } 5727 5728 i = (phba->msix_enabled) ? i : 0; 5729 /* Work item for MCC handling */ 5730 pbe_eq = &phwi_context->be_eq[i]; 5731 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs); 5732 5733 ret = beiscsi_init_irqs(phba); 5734 if (ret < 0) { 5735 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5736 "BM_%d : beiscsi_dev_probe-" 5737 "Failed to beiscsi_init_irqs\n"); 5738 goto free_blkenbld; 5739 } 5740 hwi_enable_intr(phba); 5741 5742 if (iscsi_host_add(phba->shost, &phba->pcidev->dev)) 5743 goto free_blkenbld; 5744 5745 if (beiscsi_setup_boot_info(phba)) 5746 /* 5747 * log error but continue, because we may not be using 5748 * iscsi boot. 5749 */ 5750 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, 5751 "BM_%d : Could not set up " 5752 "iSCSI boot info.\n"); 5753 5754 beiscsi_create_def_ifaces(phba); 5755 schedule_delayed_work(&phba->beiscsi_hw_check_task, 5756 msecs_to_jiffies(1000)); 5757 5758 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, 5759 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n"); 5760 return 0; 5761 5762 free_blkenbld: 5763 destroy_workqueue(phba->wq); 5764 for (i = 0; i < phba->num_cpus; i++) { 5765 pbe_eq = &phwi_context->be_eq[i]; 5766 blk_iopoll_disable(&pbe_eq->iopoll); 5767 } 5768 free_twq: 5769 beiscsi_clean_port(phba); 5770 beiscsi_free_mem(phba); 5771 free_port: 5772 pci_free_consistent(phba->pcidev, 5773 phba->ctrl.mbox_mem_alloced.size, 5774 phba->ctrl.mbox_mem_alloced.va, 5775 phba->ctrl.mbox_mem_alloced.dma); 5776 beiscsi_unmap_pci_function(phba); 5777 hba_free: 5778 if (phba->msix_enabled) 5779 pci_disable_msix(phba->pcidev); 5780 pci_dev_put(phba->pcidev); 5781 iscsi_host_free(phba->shost); 5782 pci_set_drvdata(pcidev, NULL); 5783 disable_pci: 5784 pci_release_regions(pcidev); 5785 pci_disable_device(pcidev); 5786 return ret; 5787 } 5788 5789 static struct pci_error_handlers beiscsi_eeh_handlers = { 5790 .error_detected = beiscsi_eeh_err_detected, 5791 .slot_reset = beiscsi_eeh_reset, 5792 .resume = beiscsi_eeh_resume, 5793 }; 5794 5795 struct iscsi_transport beiscsi_iscsi_transport = { 5796 .owner = THIS_MODULE, 5797 .name = DRV_NAME, 5798 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | 5799 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, 5800 .create_session = beiscsi_session_create, 5801 .destroy_session = beiscsi_session_destroy, 5802 .create_conn = beiscsi_conn_create, 5803 .bind_conn = beiscsi_conn_bind, 5804 .destroy_conn = iscsi_conn_teardown, 5805 .attr_is_visible = be2iscsi_attr_is_visible, 5806 .set_iface_param = be2iscsi_iface_set_param, 5807 .get_iface_param = be2iscsi_iface_get_param, 5808 .set_param = beiscsi_set_param, 5809 .get_conn_param = iscsi_conn_get_param, 5810 .get_session_param = iscsi_session_get_param, 5811 .get_host_param = beiscsi_get_host_param, 5812 .start_conn = beiscsi_conn_start, 5813 .stop_conn = iscsi_conn_stop, 5814 .send_pdu = iscsi_conn_send_pdu, 5815 .xmit_task = beiscsi_task_xmit, 5816 .cleanup_task = beiscsi_cleanup_task, 5817 .alloc_pdu = beiscsi_alloc_pdu, 5818 .parse_pdu_itt = beiscsi_parse_pdu, 5819 .get_stats = beiscsi_conn_get_stats, 5820 .get_ep_param = beiscsi_ep_get_param, 5821 .ep_connect = beiscsi_ep_connect, 5822 .ep_poll = beiscsi_ep_poll, 5823 .ep_disconnect = beiscsi_ep_disconnect, 5824 .session_recovery_timedout = iscsi_session_recovery_timedout, 5825 .bsg_request = beiscsi_bsg_request, 5826 }; 5827 5828 static struct pci_driver beiscsi_pci_driver = { 5829 .name = DRV_NAME, 5830 .probe = beiscsi_dev_probe, 5831 .remove = beiscsi_remove, 5832 .shutdown = beiscsi_shutdown, 5833 .id_table = beiscsi_pci_id_table, 5834 .err_handler = &beiscsi_eeh_handlers 5835 }; 5836 5837 5838 static int __init beiscsi_module_init(void) 5839 { 5840 int ret; 5841 5842 beiscsi_scsi_transport = 5843 iscsi_register_transport(&beiscsi_iscsi_transport); 5844 if (!beiscsi_scsi_transport) { 5845 printk(KERN_ERR 5846 "beiscsi_module_init - Unable to register beiscsi transport.\n"); 5847 return -ENOMEM; 5848 } 5849 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n", 5850 &beiscsi_iscsi_transport); 5851 5852 ret = pci_register_driver(&beiscsi_pci_driver); 5853 if (ret) { 5854 printk(KERN_ERR 5855 "beiscsi_module_init - Unable to register beiscsi pci driver.\n"); 5856 goto unregister_iscsi_transport; 5857 } 5858 return 0; 5859 5860 unregister_iscsi_transport: 5861 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5862 return ret; 5863 } 5864 5865 static void __exit beiscsi_module_exit(void) 5866 { 5867 pci_unregister_driver(&beiscsi_pci_driver); 5868 iscsi_unregister_transport(&beiscsi_iscsi_transport); 5869 } 5870 5871 module_init(beiscsi_module_init); 5872 module_exit(beiscsi_module_exit); 5873